Merge Android 24Q1 Release (ab/11220357)

Bug: 319669529
Merged-In: I41020f4bded9e278edaf7f2842df7b8152289ea0
Change-Id: I72c966ba72fb7d1171a886b286fc97f4aaf3d43c
diff --git a/abseil-cpp/Android.bp b/abseil-cpp/Android.bp
index 0ab3734..5f76c15 100644
--- a/abseil-cpp/Android.bp
+++ b/abseil-cpp/Android.bp
@@ -32,29 +32,35 @@
 
 cc_library_static {
     name: "libtextclassifier_abseil",
-    export_include_dirs: ["."],
-    visibility: [
-        "//external/libtextclassifier:__subpackages__",
-        "//external/tflite-support:__subpackages__",
-    ],
     srcs: [
         "absl/**/*.cc",
     ],
+    exclude_srcs: [
+        "absl/**/*benchmark.cc",
+        "absl/**/*benchmarks.cc",
+        "absl/**/*_test.cc",
+        "absl/**/*_testing.cc",
+        "absl/base/spinlock_test_common.cc",
+        "absl/hash/internal/print_hash_of.cc",
+        "absl/log/internal/test_helpers.cc",
+        "absl/log/internal/test_matchers.cc",
+        "absl/log/scoped_mock_log.cc",
+        "absl/random/internal/gaussian_distribution_gentables.cc",
+    ],
+    export_include_dirs: ["."],
+    shared_libs: [
+        "liblog",
+    ],
+    stl: "libc++",
     apex_available: [
         "//apex_available:platform",
         "com.android.extservices",
         "com.android.adservices",
     ],
+    visibility: [
+        "//external/libtextclassifier:__subpackages__",
+        "//external/tflite-support:__subpackages__",
+    ],
     sdk_version: "current",
     min_sdk_version: "30",
-    stl: "libc++_static",
-    exclude_srcs: [
-        "**/*_test.cc",
-        "**/*_test_common.cc",
-        "**/*test_util.cc",
-        "**/*test_utils.cc",
-        "**/*benchmark.cc",
-        "absl/random/benchmarks.cc",
-        "absl/hash/internal/print_hash_of.cc",
-    ],
 }
diff --git a/abseil-cpp/CMake/AbseilDll.cmake b/abseil-cpp/CMake/AbseilDll.cmake
index cf6a8c9..f0d984a 100644
--- a/abseil-cpp/CMake/AbseilDll.cmake
+++ b/abseil-cpp/CMake/AbseilDll.cmake
@@ -1,4 +1,5 @@
 include(CMakeParseArguments)
+include(GNUInstallDirs)
 
 set(ABSL_INTERNAL_DLL_FILES
   "algorithm/algorithm.h"
@@ -10,15 +11,13 @@
   "base/const_init.h"
   "base/dynamic_annotations.h"
   "base/internal/atomic_hook.h"
-  "base/internal/bits.h"
   "base/internal/cycleclock.cc"
   "base/internal/cycleclock.h"
+  "base/internal/cycleclock_config.h"
   "base/internal/direct_mmap.h"
   "base/internal/dynamic_annotations.h"
   "base/internal/endian.h"
   "base/internal/errno_saver.h"
-  "base/internal/exponential_biased.cc"
-  "base/internal/exponential_biased.h"
   "base/internal/fast_type_id.h"
   "base/internal/hide_ptr.h"
   "base/internal/identity.h"
@@ -27,9 +26,10 @@
   "base/internal/low_level_alloc.cc"
   "base/internal/low_level_alloc.h"
   "base/internal/low_level_scheduling.h"
+  "base/internal/nullability_impl.h"
   "base/internal/per_thread_tls.h"
-  "base/internal/periodic_sampler.cc"
-  "base/internal/periodic_sampler.h"
+  "base/internal/prefetch.h"
+  "base/prefetch.h"
   "base/internal/pretty_function.h"
   "base/internal/raw_logging.cc"
   "base/internal/raw_logging.h"
@@ -53,14 +53,18 @@
   "base/internal/unaligned_access.h"
   "base/internal/unscaledcycleclock.cc"
   "base/internal/unscaledcycleclock.h"
+  "base/internal/unscaledcycleclock_config.h"
   "base/log_severity.cc"
   "base/log_severity.h"
   "base/macros.h"
+  "base/nullability.h"
   "base/optimization.h"
   "base/options.h"
   "base/policy_checks.h"
   "base/port.h"
   "base/thread_annotations.h"
+  "cleanup/cleanup.h"
+  "cleanup/internal/cleanup.h"
   "container/btree_map.h"
   "container/btree_set.h"
   "container/fixed_array.h"
@@ -70,6 +74,7 @@
   "container/internal/btree.h"
   "container/internal/btree_container.h"
   "container/internal/common.h"
+  "container/internal/common_policy_traits.h"
   "container/internal/compressed_tuple.h"
   "container/internal/container_memory.h"
   "container/internal/counting_allocator.h"
@@ -80,20 +85,38 @@
   "container/internal/hashtablez_sampler.cc"
   "container/internal/hashtablez_sampler.h"
   "container/internal/hashtablez_sampler_force_weak_definition.cc"
-  "container/internal/have_sse.h"
   "container/internal/inlined_vector.h"
   "container/internal/layout.h"
-  "container/internal/node_hash_policy.h"
+  "container/internal/node_slot_policy.h"
   "container/internal/raw_hash_map.h"
   "container/internal/raw_hash_set.cc"
   "container/internal/raw_hash_set.h"
   "container/internal/tracked.h"
   "container/node_hash_map.h"
   "container/node_hash_set.h"
+  "crc/crc32c.cc"
+  "crc/crc32c.h"
+  "crc/internal/cpu_detect.cc"
+  "crc/internal/cpu_detect.h"
+  "crc/internal/crc32c.h"
+  "crc/internal/crc32c_inline.h"
+  "crc/internal/crc32_x86_arm_combined_simd.h"
+  "crc/internal/crc.cc"
+  "crc/internal/crc.h"
+  "crc/internal/crc_cord_state.cc"
+  "crc/internal/crc_cord_state.h"
+  "crc/internal/crc_internal.h"
+  "crc/internal/crc_x86_arm_combined.cc"
+  "crc/internal/crc_memcpy_fallback.cc"
+  "crc/internal/crc_memcpy.h"
+  "crc/internal/crc_memcpy_x86_64.cc"
+  "crc/internal/crc_non_temporal_memcpy.cc"
+  "crc/internal/crc_x86_arm_combined.cc"
+  "crc/internal/non_temporal_arm_intrinsics.h"
+  "crc/internal/non_temporal_memcpy.h"
   "debugging/failure_signal_handler.cc"
   "debugging/failure_signal_handler.h"
   "debugging/leak_check.h"
-  "debugging/leak_check_disable.cc"
   "debugging/stacktrace.cc"
   "debugging/stacktrace.h"
   "debugging/symbolize.cc"
@@ -112,9 +135,11 @@
   "debugging/internal/symbolize.h"
   "debugging/internal/vdso_support.cc"
   "debugging/internal/vdso_support.h"
+  "functional/any_invocable.h"
   "functional/internal/front_binder.h"
   "functional/bind_front.h"
   "functional/function_ref.h"
+  "functional/internal/any_invocable.h"
   "functional/internal/function_ref.h"
   "hash/hash.h"
   "hash/internal/city.h"
@@ -122,10 +147,61 @@
   "hash/internal/hash.h"
   "hash/internal/hash.cc"
   "hash/internal/spy_hash_state.h"
+  "hash/internal/low_level_hash.h"
+  "hash/internal/low_level_hash.cc"
+  "log/absl_check.h"
+  "log/absl_log.h"
+  "log/check.h"
+  "log/die_if_null.cc"
+  "log/die_if_null.h"
+  "log/globals.cc"
+  "log/globals.h"
+  "log/internal/append_truncated.h"
+  "log/internal/check_impl.h"
+  "log/internal/check_op.cc"
+  "log/internal/check_op.h"
+  "log/internal/conditions.cc"
+  "log/internal/conditions.h"
+  "log/internal/config.h"
+  "log/internal/globals.cc"
+  "log/internal/globals.h"
+  "log/internal/log_format.cc"
+  "log/internal/log_format.h"
+  "log/internal/log_impl.h"
+  "log/internal/log_message.cc"
+  "log/internal/log_message.h"
+  "log/internal/log_sink_set.cc"
+  "log/internal/log_sink_set.h"
+  "log/internal/nullguard.cc"
+  "log/internal/nullguard.h"
+  "log/internal/nullstream.h"
+  "log/internal/proto.h"
+  "log/internal/proto.cc"
+  "log/internal/strip.h"
+  "log/internal/structured.h"
+  "log/internal/voidify.h"
+  "log/initialize.cc"
+  "log/initialize.h"
+  "log/log.h"
+  "log/log_entry.cc"
+  "log/log_entry.h"
+  "log/log_sink.cc"
+  "log/log_sink.h"
+  "log/log_sink_registry.h"
+  "log/log_streamer.h"
+  "log/structured.h"
   "memory/memory.h"
   "meta/type_traits.h"
+  "numeric/bits.h"
   "numeric/int128.cc"
   "numeric/int128.h"
+  "numeric/internal/bits.h"
+  "numeric/internal/representation.h"
+  "profiling/internal/exponential_biased.cc"
+  "profiling/internal/exponential_biased.h"
+  "profiling/internal/periodic_sampler.cc"
+  "profiling/internal/periodic_sampler.h"
+  "profiling/internal/sample_recorder.h"
   "random/bernoulli_distribution.h"
   "random/beta_distribution.h"
   "random/bit_gen_ref.h"
@@ -140,7 +216,6 @@
   "random/internal/fast_uniform_bits.h"
   "random/internal/generate_real.h"
   "random/internal/iostream_state_saver.h"
-  "random/internal/mock_helpers.h"
   "random/internal/nonsecure_base.h"
   "random/internal/pcg_engine.h"
   "random/internal/platform.h"
@@ -188,14 +263,51 @@
   "strings/charconv.h"
   "strings/cord.cc"
   "strings/cord.h"
+  "strings/cord_analysis.cc"
+  "strings/cord_analysis.h"
+  "strings/cord_buffer.cc"
+  "strings/cord_buffer.h"
   "strings/escaping.cc"
   "strings/escaping.h"
-  "strings/internal/cord_internal.h"
   "strings/internal/charconv_bigint.cc"
   "strings/internal/charconv_bigint.h"
   "strings/internal/charconv_parse.cc"
   "strings/internal/charconv_parse.h"
+  "strings/internal/cord_data_edge.h"
+  "strings/internal/cord_internal.cc"
+  "strings/internal/cord_internal.h"
+  "strings/internal/cord_rep_btree.cc"
+  "strings/internal/cord_rep_btree.h"
+  "strings/internal/cord_rep_btree_navigator.cc"
+  "strings/internal/cord_rep_btree_navigator.h"
+  "strings/internal/cord_rep_btree_reader.cc"
+  "strings/internal/cord_rep_btree_reader.h"
+  "strings/internal/cord_rep_crc.cc"
+  "strings/internal/cord_rep_crc.h"
+  "strings/internal/cord_rep_consume.h"
+  "strings/internal/cord_rep_consume.cc"
+  "strings/internal/cord_rep_flat.h"
+  "strings/internal/cord_rep_ring.cc"
+  "strings/internal/cord_rep_ring.h"
+  "strings/internal/cord_rep_ring_reader.h"
+  "strings/internal/cordz_functions.cc"
+  "strings/internal/cordz_functions.h"
+  "strings/internal/cordz_handle.cc"
+  "strings/internal/cordz_handle.h"
+  "strings/internal/cordz_info.cc"
+  "strings/internal/cordz_info.h"
+  "strings/internal/cordz_sample_token.cc"
+  "strings/internal/cordz_sample_token.h"
+  "strings/internal/cordz_statistics.h"
+  "strings/internal/cordz_update_scope.h"
+  "strings/internal/cordz_update_tracker.h"
+  "strings/internal/damerau_levenshtein_distance.h"
+  "strings/internal/damerau_levenshtein_distance.cc"
   "strings/internal/stl_type_traits.h"
+  "strings/internal/string_constant.h"
+  "strings/internal/stringify_sink.h"
+  "strings/internal/stringify_sink.cc"
+  "strings/internal/has_absl_stringify.h"
   "strings/match.cc"
   "strings/match.h"
   "strings/numbers.cc"
@@ -228,6 +340,7 @@
   "strings/internal/str_format/bind.cc"
   "strings/internal/str_format/bind.h"
   "strings/internal/str_format/checker.h"
+  "strings/internal/str_format/constexpr_parser.h"
   "strings/internal/str_format/extension.cc"
   "strings/internal/str_format/extension.h"
   "strings/internal/str_format/float_conversion.cc"
@@ -250,14 +363,27 @@
   "synchronization/notification.h"
   "synchronization/internal/create_thread_identity.cc"
   "synchronization/internal/create_thread_identity.h"
+  "synchronization/internal/futex.h"
+  "synchronization/internal/futex_waiter.h"
+  "synchronization/internal/futex_waiter.cc"
   "synchronization/internal/graphcycles.cc"
   "synchronization/internal/graphcycles.h"
   "synchronization/internal/kernel_timeout.h"
+  "synchronization/internal/kernel_timeout.cc"
   "synchronization/internal/per_thread_sem.cc"
   "synchronization/internal/per_thread_sem.h"
+  "synchronization/internal/pthread_waiter.h"
+  "synchronization/internal/pthread_waiter.cc"
+  "synchronization/internal/sem_waiter.h"
+  "synchronization/internal/sem_waiter.cc"
+  "synchronization/internal/stdcpp_waiter.h"
+  "synchronization/internal/stdcpp_waiter.cc"
   "synchronization/internal/thread_pool.h"
-  "synchronization/internal/waiter.cc"
   "synchronization/internal/waiter.h"
+  "synchronization/internal/waiter_base.h"
+  "synchronization/internal/waiter_base.cc"
+  "synchronization/internal/win32_waiter.h"
+  "synchronization/internal/win32_waiter.cc"
   "time/civil_time.cc"
   "time/civil_time.h"
   "time/clock.cc"
@@ -306,126 +432,217 @@
   "types/span.h"
   "types/internal/span.h"
   "types/variant.h"
+  "utility/internal/if_constexpr.h"
   "utility/utility.h"
+  "debugging/leak_check.cc"
 )
 
 set(ABSL_INTERNAL_DLL_TARGETS
-  "stacktrace"
-  "symbolize"
-  "examine_stack"
-  "failure_signal_handler"
-  "debugging_internal"
-  "demangle_internal"
-  "leak_check"
-  "leak_check_disable"
-  "stack_consumption"
-  "debugging"
-  "hash"
-  "spy_hash_state"
-  "city"
-  "memory"
-  "strings"
-  "strings_internal"
-  "cord"
-  "str_format"
-  "str_format_internal"
-  "pow10_helper"
-  "int128"
-  "numeric"
-  "utility"
-  "any"
-  "bad_any_cast"
-  "bad_any_cast_impl"
-  "span"
-  "optional"
-  "bad_optional_access"
-  "bad_variant_access"
-  "variant"
-  "compare"
+  "absl_check"
+  "absl_log"
   "algorithm"
   "algorithm_container"
-  "graphcycles_internal"
-  "kernel_timeout_internal"
-  "synchronization"
-  "thread_pool"
-  "bind_front"
-  "function_ref"
+  "any"
+  "any_invocable"
   "atomic_hook"
-  "log_severity"
-  "raw_logging_internal"
-  "spinlock_wait"
-  "config"
-  "dynamic_annotations"
-  "core_headers"
-  "malloc_internal"
-  "base_internal"
+  "bad_any_cast"
+  "bad_any_cast_impl"
+  "bad_optional_access"
+  "bad_variant_access"
   "base"
-  "throw_delegate"
-  "pretty_function"
-  "endian"
+  "base_internal"
+  "bind_front"
   "bits"
-  "exponential_biased"
-  "periodic_sampler"
-  "scoped_set_env"
-  "type_traits"
-  "meta"
-  "random_random"
-  "random_bit_gen_ref"
-  "random_distributions"
-  "random_seed_gen_exception"
-  "random_seed_sequences"
-  "random_internal_traits"
-  "random_internal_distribution_caller"
-  "random_internal_distributions"
-  "random_internal_fast_uniform_bits"
-  "random_internal_seed_material"
-  "random_internal_pool_urbg"
-  "random_internal_explicit_seed_seq"
-  "random_internal_sequence_urbg"
-  "random_internal_salted_seed_seq"
-  "random_internal_iostream_state_saver"
-  "random_internal_generate_real"
-  "random_internal_wide_multiply"
-  "random_internal_fastmath"
-  "random_internal_nonsecure_base"
-  "random_internal_pcg_engine"
-  "random_internal_randen_engine"
-  "random_internal_platform"
-  "random_internal_randen"
-  "random_internal_randen_slow"
-  "random_internal_randen_hwaes"
-  "random_internal_randen_hwaes_impl"
-  "random_internal_uniform_helper"
-  "status"
-  "time"
-  "civil_time"
-  "time_zone"
-  "container"
   "btree"
+  "check"
+  "city"
+  "civil_time"
+  "compare"
   "compressed_tuple"
-  "fixed_array"
-  "inlined_vector_internal"
-  "inlined_vector"
+  "config"
+  "container"
+  "container_common"
+  "container_memory"
+  "cord"
+  "cord_internal"
+  "cordz_functions"
+  "cordz_handle"
+  "cordz_info"
+  "cordz_sample_token"
+  "core_headers"
   "counting_allocator"
+  "crc_cord_state"
+  "crc_cpu_detect"
+  "crc_internal"
+  "crc32c"
+  "debugging"
+  "debugging_internal"
+  "demangle_internal"
+  "die_if_null"
+  "dynamic_annotations"
+  "endian"
+  "examine_stack"
+  "exponential_biased"
+  "failure_signal_handler"
+  "fixed_array"
   "flat_hash_map"
   "flat_hash_set"
-  "node_hash_map"
-  "node_hash_set"
-  "container_memory"
+  "function_ref"
+  "graphcycles_internal"
+  "hash"
   "hash_function_defaults"
   "hash_policy_traits"
-  "hashtablez_sampler"
   "hashtable_debug"
   "hashtable_debug_hooks"
-  "have_sse"
-  "node_hash_policy"
-  "raw_hash_map"
-  "container_common"
-  "raw_hash_set"
+  "hashtablez_sampler"
+  "inlined_vector"
+  "inlined_vector_internal"
+  "int128"
+  "kernel_timeout_internal"
   "layout"
+  "leak_check"
+  "log_internal_check_impl"
+  "log_internal_check_op"
+  "log_internal_conditions"
+  "log_internal_config"
+  "log_internal_format"
+  "log_internal_globals"
+  "log_internal_log_impl"
+  "log_internal_proto"
+  "log_internal_message"
+  "log_internal_log_sink_set"
+  "log_internal_nullguard"
+  "log_internal_nullstream"
+  "log_internal_strip"
+  "log_internal_voidify"
+  "log_internal_append_truncated"
+  "log_globals"
+  "log_initialize"
+  "log"
+  "log_entry"
+  "log_sink"
+  "log_sink_registry"
+  "log_streamer"
+  "log_internal_structured"
+  "log_severity"
+  "log_structured"
+  "low_level_hash"
+  "malloc_internal"
+  "memory"
+  "meta"
+  "node_hash_map"
+  "node_hash_set"
+  "node_slot_policy"
+  "non_temporal_arm_intrinsics"
+  "non_temporal_memcpy"
+  "numeric"
+  "optional"
+  "periodic_sampler"
+  "pow10_helper"
+  "pretty_function"
+  "random_bit_gen_ref"
+  "random_distributions"
+  "random_internal_distribution_caller"
+  "random_internal_distributions"
+  "random_internal_explicit_seed_seq"
+  "random_internal_fastmath"
+  "random_internal_fast_uniform_bits"
+  "random_internal_generate_real"
+  "random_internal_iostream_state_saver"
+  "random_internal_nonsecure_base"
+  "random_internal_pcg_engine"
+  "random_internal_platform"
+  "random_internal_pool_urbg"
+  "random_internal_randen"
+  "random_internal_randen_engine"
+  "random_internal_randen_hwaes"
+  "random_internal_randen_hwaes_impl"
+  "random_internal_randen_slow"
+  "random_internal_salted_seed_seq"
+  "random_internal_seed_material"
+  "random_internal_sequence_urbg"
+  "random_internal_traits"
+  "random_internal_uniform_helper"
+  "random_internal_wide_multiply"
+  "random_random"
+  "random_seed_gen_exception"
+  "random_seed_sequences"
+  "raw_hash_map"
+  "raw_hash_set"
+  "raw_logging_internal"
+  "sample_recorder"
+  "scoped_set_env"
+  "span"
+  "spinlock_wait"
+  "spy_hash_state"
+  "stack_consumption"
+  "stacktrace"
+  "status"
+  "statusor"
+  "str_format"
+  "str_format_internal"
+  "strerror"
+  "strings"
+  "strings_internal"
+  "symbolize"
+  "synchronization"
+  "thread_pool"
+  "throw_delegate"
+  "time"
+  "time_zone"
   "tracked"
+  "type_traits"
+  "utility"
+  "variant"
 )
 
+set(ABSL_INTERNAL_TEST_DLL_FILES
+  "hash/hash_testing.h"
+  "log/scoped_mock_log.cc"
+  "log/scoped_mock_log.h"
+  "random/internal/chi_square.cc"
+  "random/internal/chi_square.h"
+  "random/internal/distribution_test_util.cc"
+  "random/internal/distribution_test_util.h"
+  "random/internal/mock_helpers.h"
+  "random/internal/mock_overload_set.h"
+  "random/mocking_bit_gen.h"
+  "random/mock_distributions.h"
+  "strings/cordz_test_helpers.h"
+  "strings/cord_test_helpers.h"
+)
+
+set(ABSL_INTERNAL_TEST_DLL_TARGETS
+  "cord_test_helpers"
+  "cordz_test_helpers"
+  "hash_testing"
+  "random_mocking_bit_gen"
+  "random_internal_distribution_test_util"
+  "random_internal_mock_overload_set"
+  "scoped_mock_log"
+)
+
+include(CheckCXXSourceCompiles)
+
+check_cxx_source_compiles(
+  [==[
+#ifdef _MSC_VER
+#  if _MSVC_LANG < 201700L
+#    error "The compiler defaults or is configured for C++ < 17"
+#  endif
+#elif __cplusplus < 201700L
+#  error "The compiler defaults or is configured for C++ < 17"
+#endif
+int main() { return 0; }
+]==]
+  ABSL_INTERNAL_AT_LEAST_CXX17)
+
+if(ABSL_INTERNAL_AT_LEAST_CXX17)
+  set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17)
+else()
+  set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_14)
+endif()
+
 function(absl_internal_dll_contains)
   cmake_parse_arguments(ABSL_INTERNAL_DLL
     ""
@@ -448,6 +665,28 @@
   endif()
 endfunction()
 
+function(absl_internal_test_dll_contains)
+  cmake_parse_arguments(ABSL_INTERNAL_TEST_DLL
+    ""
+    "OUTPUT;TARGET"
+    ""
+    ${ARGN}
+  )
+
+  STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_TEST_DLL_TARGET})
+
+  list(FIND
+    ABSL_INTERNAL_TEST_DLL_TARGETS
+    "${_target}"
+    _index)
+
+  if (${_index} GREATER -1)
+    set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 1 PARENT_SCOPE)
+  else()
+    set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 0 PARENT_SCOPE)
+  endif()
+endfunction()
+
 function(absl_internal_dll_targets)
   cmake_parse_arguments(ABSL_INTERNAL_DLL
   ""
@@ -458,9 +697,12 @@
 
   set(_deps "")
   foreach(dep IN LISTS ABSL_INTERNAL_DLL_DEPS)
-    absl_internal_dll_contains(TARGET ${dep} OUTPUT _contains)
-    if (_contains)
+    absl_internal_dll_contains(TARGET ${dep} OUTPUT _dll_contains)
+    absl_internal_test_dll_contains(TARGET ${dep} OUTPUT _test_dll_contains)
+    if (_dll_contains)
       list(APPEND _deps abseil_dll)
+    elseif (_test_dll_contains)
+      list(APPEND _deps abseil_test_dll)
     else()
       list(APPEND _deps ${dep})
     endif()
@@ -472,41 +714,109 @@
 endfunction()
 
 function(absl_make_dll)
+  cmake_parse_arguments(ABSL_INTERNAL_MAKE_DLL
+  ""
+  "TEST"
+  ""
+  ${ARGN}
+  )
+
+  if (ABSL_INTERNAL_MAKE_DLL_TEST)
+    set(_dll "abseil_test_dll")
+    set(_dll_files ${ABSL_INTERNAL_TEST_DLL_FILES})
+    set(_dll_libs "abseil_dll" "GTest::gtest" "GTest::gmock")
+    set(_dll_compile_definitions "GTEST_LINKED_AS_SHARED_LIBRARY=1")
+    set(_dll_includes ${absl_gtest_src_dir}/googletest/include ${absl_gtest_src_dir}/googlemock/include)
+    set(_dll_consume "ABSL_CONSUME_TEST_DLL")
+    set(_dll_build "ABSL_BUILD_TEST_DLL")
+  else()
+    set(_dll "abseil_dll")
+    set(_dll_files ${ABSL_INTERNAL_DLL_FILES})
+    set(_dll_libs "")
+    set(_dll_compile_definitions "")
+    set(_dll_includes "")
+    set(_dll_consume "ABSL_CONSUME_DLL")
+    set(_dll_build "ABSL_BUILD_DLL")
+  endif()
+
   add_library(
-    abseil_dll
+    ${_dll}
     SHARED
-      "${ABSL_INTERNAL_DLL_FILES}"
+      ${_dll_files}
   )
   target_link_libraries(
-    abseil_dll
+    ${_dll}
     PRIVATE
+      ${_dll_libs}
       ${ABSL_DEFAULT_LINKOPTS}
   )
-  set_property(TARGET abseil_dll PROPERTY LINKER_LANGUAGE "CXX")
+  set_property(TARGET ${_dll} PROPERTY LINKER_LANGUAGE "CXX")
   target_include_directories(
-    abseil_dll
+    ${_dll}
     PUBLIC
       "$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>"
-      $<INSTALL_INTERFACE:${ABSL_INSTALL_INCLUDEDIR}>
+      $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+    PRIVATE
+      ${_dll_includes}
   )
 
   target_compile_options(
-    abseil_dll
+    ${_dll}
     PRIVATE
       ${ABSL_DEFAULT_COPTS}
   )
 
+  foreach(cflag ${ABSL_CC_LIB_COPTS})
+    if(${cflag} MATCHES "^(-Wno|/wd)")
+      # These flags are needed to suppress warnings that might fire in our headers.
+      set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
+    elseif(${cflag} MATCHES "^(-W|/w[1234eo])")
+      # Don't impose our warnings on others.
+    else()
+      set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
+    endif()
+  endforeach()
+  string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}")
+
+  FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/${_dll}.pc" CONTENT "\
+prefix=${CMAKE_INSTALL_PREFIX}\n\
+exec_prefix=\${prefix}\n\
+libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\
+includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\
+\n\
+Name: ${_dll}\n\
+Description: Abseil DLL library\n\
+URL: https://abseil.io/\n\
+Version: ${absl_VERSION}\n\
+Libs: -L\${libdir} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-l${_dll}> ${PC_LINKOPTS}\n\
+Cflags: -I\${includedir}${PC_CFLAGS}\n")
+  INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/${_dll}.pc"
+    DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
+
   target_compile_definitions(
-    abseil_dll
+    ${_dll}
+    PUBLIC
+      ${_dll_compile_definitions}
     PRIVATE
-      ABSL_BUILD_DLL
+      ${_dll_build}
       NOMINMAX
     INTERFACE
       ${ABSL_CC_LIB_DEFINES}
+      ${_dll_consume}
   )
-  install(TARGETS abseil_dll EXPORT ${PROJECT_NAME}Targets
-        RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR}
-        LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR}
-        ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR}
+
+  if(ABSL_PROPAGATE_CXX_STD)
+    # Abseil libraries require C++14 as the current minimum standard. When
+    # compiled with C++17 (either because it is the compiler's default or
+    # explicitly requested), then Abseil requires C++17.
+    target_compile_features(${_dll} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
+  endif()
+
+  install(TARGETS ${_dll} EXPORT ${PROJECT_NAME}Targets
+        RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+        LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+        ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
   )
+
+  add_library(absl::${_dll} ALIAS ${_dll})
 endfunction()
diff --git a/abseil-cpp/CMake/AbseilHelpers.cmake b/abseil-cpp/CMake/AbseilHelpers.cmake
index 8b2925c..3bd33ce 100644
--- a/abseil-cpp/CMake/AbseilHelpers.cmake
+++ b/abseil-cpp/CMake/AbseilHelpers.cmake
@@ -17,7 +17,6 @@
 include(CMakeParseArguments)
 include(AbseilConfigureCopts)
 include(AbseilDll)
-include(AbseilInstallDirs)
 
 # The IDE folder for Abseil that will be used if Abseil is included in a CMake
 # project that sets
@@ -27,6 +26,12 @@
   set(ABSL_IDE_FOLDER Abseil)
 endif()
 
+if(ABSL_USE_SYSTEM_INCLUDES)
+  set(ABSL_INTERNAL_INCLUDE_WARNING_GUARD SYSTEM)
+else()
+  set(ABSL_INTERNAL_INCLUDE_WARNING_GUARD "")
+endif()
+
 # absl_cc_library()
 #
 # CMake function to imitate Bazel's cc_library rule.
@@ -41,7 +46,8 @@
 # LINKOPTS: List of link options
 # PUBLIC: Add this so that this library will be exported under absl::
 # Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal.
-# TESTONLY: When added, this target will only be built if user passes -DABSL_RUN_TESTS=ON to CMake.
+# TESTONLY: When added, this target will only be built if both
+#           BUILD_TESTING=ON and ABSL_BUILD_TESTING=ON.
 #
 # Note:
 # By default, absl_cc_library will always create a library named absl_${NAME},
@@ -83,7 +89,9 @@
     ${ARGN}
   )
 
-  if(ABSL_CC_LIB_TESTONLY AND NOT ABSL_RUN_TESTS)
+  if(ABSL_CC_LIB_TESTONLY AND
+      NOT ((BUILD_TESTING AND ABSL_BUILD_TESTING) OR
+        (ABSL_BUILD_TEST_HELPERS AND ABSL_CC_LIB_PUBLIC)))
     return()
   endif()
 
@@ -104,7 +112,7 @@
     endif()
   endforeach()
 
-  if("${ABSL_CC_SRCS}" STREQUAL "")
+  if(ABSL_CC_SRCS STREQUAL "")
     set(ABSL_CC_LIB_IS_INTERFACE 1)
   else()
     set(ABSL_CC_LIB_IS_INTERFACE 0)
@@ -122,8 +130,14 @@
   # 4. "static"  -- This target does not depend on the DLL and should be built
   #                 statically.
   if (${ABSL_BUILD_DLL})
-    absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll)
-    if (${_in_dll})
+    if(ABSL_ENABLE_INSTALL)
+      absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll)
+      absl_internal_test_dll_contains(TARGET ${_NAME} OUTPUT _in_test_dll)
+    else()
+      absl_internal_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_dll)
+      absl_internal_test_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_test_dll)
+    endif()
+    if (${_in_dll} OR ${_in_test_dll})
       # This target should be replaced by the DLL
       set(_build_type "dll")
       set(ABSL_CC_LIB_IS_INTERFACE 1)
@@ -137,8 +151,74 @@
     set(_build_type "static")
   endif()
 
+  # Generate a pkg-config file for every library:
+  if(ABSL_ENABLE_INSTALL)
+    if(absl_VERSION)
+      set(PC_VERSION "${absl_VERSION}")
+    else()
+      set(PC_VERSION "head")
+    endif()
+    if(NOT _build_type STREQUAL "dll")
+      set(LNK_LIB "${LNK_LIB} -labsl_${_NAME}")
+    endif()
+    foreach(dep ${ABSL_CC_LIB_DEPS})
+      if(${dep} MATCHES "^absl::(.*)")
+        # for DLL builds many libs are not created, but add
+        # the pkgconfigs nevertheless, pointing to the dll.
+        if(_build_type STREQUAL "dll")
+          # hide this MATCHES in an if-clause so it doesn't overwrite
+          # the CMAKE_MATCH_1 from (${dep} MATCHES "^absl::(.*)")
+          if(NOT PC_DEPS MATCHES "abseil_dll")
+            # Join deps with commas.
+            if(PC_DEPS)
+              set(PC_DEPS "${PC_DEPS},")
+            endif()
+            # don't duplicate dll-dep if it exists already
+            set(PC_DEPS "${PC_DEPS} abseil_dll = ${PC_VERSION}")
+            set(LNK_LIB "${LNK_LIB} -labseil_dll")
+          endif()
+        else()
+          # Join deps with commas.
+          if(PC_DEPS)
+            set(PC_DEPS "${PC_DEPS},")
+          endif()
+          set(PC_DEPS "${PC_DEPS} absl_${CMAKE_MATCH_1} = ${PC_VERSION}")
+        endif()
+      endif()
+    endforeach()
+    foreach(cflag ${ABSL_CC_LIB_COPTS})
+      if(${cflag} MATCHES "^(-Wno|/wd)")
+        # These flags are needed to suppress warnings that might fire in our headers.
+        set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
+      elseif(${cflag} MATCHES "^(-W|/w[1234eo])")
+        # Don't impose our warnings on others.
+      elseif(${cflag} MATCHES "^-m")
+        # Don't impose CPU instruction requirements on others, as
+        # the code performs feature detection on runtime.
+      else()
+        set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
+      endif()
+    endforeach()
+    string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}")
+    FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\
+prefix=${CMAKE_INSTALL_PREFIX}\n\
+exec_prefix=\${prefix}\n\
+libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\
+includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\
+\n\
+Name: absl_${_NAME}\n\
+Description: Abseil ${_NAME} library\n\
+URL: https://abseil.io/\n\
+Version: ${PC_VERSION}\n\
+Requires:${PC_DEPS}\n\
+Libs: -L\${libdir} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:${LNK_LIB}> ${PC_LINKOPTS}\n\
+Cflags: -I\${includedir}${PC_CFLAGS}\n")
+    INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc"
+            DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
+  endif()
+
   if(NOT ABSL_CC_LIB_IS_INTERFACE)
-    if(${_build_type} STREQUAL "dll_dep")
+    if(_build_type STREQUAL "dll_dep")
       # This target depends on the DLL. When adding dependencies to this target,
       # any depended-on-target which is contained inside the DLL is replaced
       # with a dependency on the DLL.
@@ -167,7 +247,7 @@
           "${_gtest_link_define}"
       )
 
-    elseif(${_build_type} STREQUAL "static" OR ${_build_type} STREQUAL "shared")
+    elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared")
       add_library(${_NAME} "")
       target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS})
       target_link_libraries(${_NAME}
@@ -187,10 +267,10 @@
     # unconditionally.
     set_property(TARGET ${_NAME} PROPERTY LINKER_LANGUAGE "CXX")
 
-    target_include_directories(${_NAME}
+    target_include_directories(${_NAME} ${ABSL_INTERNAL_INCLUDE_WARNING_GUARD}
       PUBLIC
         "$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>"
-        $<INSTALL_INTERFACE:${ABSL_INSTALL_INCLUDEDIR}>
+        $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
     )
     target_compile_options(${_NAME}
       PRIVATE ${ABSL_CC_LIB_COPTS})
@@ -205,9 +285,12 @@
       set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/internal)
     endif()
 
-    # INTERFACE libraries can't have the CXX_STANDARD property set
-    set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
-    set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+    if(ABSL_PROPAGATE_CXX_STD)
+      # Abseil libraries require C++14 as the current minimum standard. When
+      # compiled with C++17 (either because it is the compiler's default or
+      # explicitly requested), then Abseil requires C++17.
+      target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
+    endif()
 
     # When being installed, we lose the absl_ prefix.  We want to put it back
     # to have properly named lib files.  This is a no-op when we are not being
@@ -215,18 +298,19 @@
     if(ABSL_ENABLE_INSTALL)
       set_target_properties(${_NAME} PROPERTIES
         OUTPUT_NAME "absl_${_NAME}"
+        SOVERSION "2308.0.0"
       )
     endif()
   else()
     # Generating header-only library
     add_library(${_NAME} INTERFACE)
-    target_include_directories(${_NAME}
+    target_include_directories(${_NAME} ${ABSL_INTERNAL_INCLUDE_WARNING_GUARD}
       INTERFACE
         "$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>"
-        $<INSTALL_INTERFACE:${ABSL_INSTALL_INCLUDEDIR}>
+        $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
       )
 
-    if (${_build_type} STREQUAL "dll")
+    if (_build_type STREQUAL "dll")
         set(ABSL_CC_LIB_DEPS abseil_dll)
     endif()
 
@@ -237,15 +321,20 @@
         ${ABSL_DEFAULT_LINKOPTS}
     )
     target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES})
+
+    if(ABSL_PROPAGATE_CXX_STD)
+      # Abseil libraries require C++14 as the current minimum standard.
+      # Top-level application CMake projects should ensure a consistent C++
+      # standard for all compiled sources by setting CMAKE_CXX_STANDARD.
+      target_compile_features(${_NAME} INTERFACE ${ABSL_INTERNAL_CXX_STD_FEATURE})
+    endif()
   endif()
 
-  # TODO currently we don't install googletest alongside abseil sources, so
-  # installed abseil can't be tested.
-  if(NOT ABSL_CC_LIB_TESTONLY AND ABSL_ENABLE_INSTALL)
+  if(ABSL_ENABLE_INSTALL)
     install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets
-          RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR}
-          LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR}
-          ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR}
+          RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+          LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+          ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
     )
   endif()
 
@@ -286,11 +375,11 @@
 #     "awesome_test.cc"
 #   DEPS
 #     absl::awesome
-#     gmock
-#     gtest_main
+#     GTest::gmock
+#     GTest::gtest_main
 # )
 function(absl_cc_test)
-  if(NOT ABSL_RUN_TESTS)
+  if(NOT (BUILD_TESTING AND ABSL_BUILD_TESTING))
     return()
   endif()
 
@@ -307,7 +396,7 @@
   target_sources(${_NAME} PRIVATE ${ABSL_CC_TEST_SRCS})
   target_include_directories(${_NAME}
     PUBLIC ${ABSL_COMMON_INCLUDE_DIRS}
-    PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS}
+    PRIVATE ${absl_gtest_src_dir}/googletest/include ${absl_gtest_src_dir}/googlemock/include
   )
 
   if (${ABSL_BUILD_DLL})
@@ -315,6 +404,7 @@
       PUBLIC
         ${ABSL_CC_TEST_DEFINES}
         ABSL_CONSUME_DLL
+        ABSL_CONSUME_TEST_DLL
         GTEST_LINKED_AS_SHARED_LIBRARY=1
     )
 
@@ -323,6 +413,10 @@
       DEPS ${ABSL_CC_TEST_DEPS}
       OUTPUT ABSL_CC_TEST_DEPS
     )
+    absl_internal_dll_targets(
+      DEPS ${ABSL_CC_TEST_LINKOPTS}
+      OUTPUT ABSL_CC_TEST_LINKOPTS
+    )
   else()
     target_compile_definitions(${_NAME}
       PUBLIC
@@ -340,16 +434,12 @@
   # Add all Abseil targets to a folder in the IDE for organization.
   set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test)
 
-  set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
-  set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+  if(ABSL_PROPAGATE_CXX_STD)
+    # Abseil libraries require C++14 as the current minimum standard.
+    # Top-level application CMake projects should ensure a consistent C++
+    # standard for all compiled sources by setting CMAKE_CXX_STANDARD.
+    target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
+  endif()
 
   add_test(NAME ${_NAME} COMMAND ${_NAME})
 endfunction()
-
-
-function(check_target my_target)
-  if(NOT TARGET ${my_target})
-    message(FATAL_ERROR " ABSL: compiling absl requires a ${my_target} CMake target in your project,
-                   see CMake/README.md for more details")
-  endif(NOT TARGET ${my_target})
-endfunction()
diff --git a/abseil-cpp/CMake/AbseilInstallDirs.cmake b/abseil-cpp/CMake/AbseilInstallDirs.cmake
deleted file mode 100644
index 6fc914b..0000000
--- a/abseil-cpp/CMake/AbseilInstallDirs.cmake
+++ /dev/null
@@ -1,20 +0,0 @@
-include(GNUInstallDirs)
-
-# absl_VERSION is only set if we are an LTS release being installed, in which
-# case it may be into a system directory and so we need to make subdirectories
-# for each installed version of Abseil.  This mechanism is implemented in
-# Abseil's internal Copybara (https://github.com/google/copybara) workflows and
-# isn't visible in the CMake buildsystem itself.
-
-if(absl_VERSION)
-  set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}")
-  set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}")
-  set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}")
-  set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/${ABSL_SUBDIR}")
-  set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}")
-else()
-  set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}")
-  set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
-  set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}")
-  set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}")
-endif()
diff --git a/abseil-cpp/CMake/Googletest/CMakeLists.txt.in b/abseil-cpp/CMake/Googletest/CMakeLists.txt.in
index 994dac0..75691b1 100644
--- a/abseil-cpp/CMake/Googletest/CMakeLists.txt.in
+++ b/abseil-cpp/CMake/Googletest/CMakeLists.txt.in
@@ -1,26 +1,14 @@
-cmake_minimum_required(VERSION 2.8.2)
+cmake_minimum_required(VERSION 3.10)
 
 project(googletest-external NONE)
 
 include(ExternalProject)
-if(${ABSL_USE_GOOGLETEST_HEAD})
-  ExternalProject_Add(googletest
-    GIT_REPOSITORY    https://github.com/google/googletest.git
-    GIT_TAG           master
-    SOURCE_DIR        "${absl_gtest_src_dir}"
-    BINARY_DIR        "${absl_gtest_build_dir}"
-    CONFIGURE_COMMAND ""
-    BUILD_COMMAND     ""
-    INSTALL_COMMAND   ""
-    TEST_COMMAND      ""
-  )
-else()
-  ExternalProject_Add(googletest
-    SOURCE_DIR        "${absl_gtest_src_dir}"
-    BINARY_DIR        "${absl_gtest_build_dir}"
-    CONFIGURE_COMMAND ""
-    BUILD_COMMAND     ""
-    INSTALL_COMMAND   ""
-    TEST_COMMAND      ""
-  )
-endif()
\ No newline at end of file
+ExternalProject_Add(googletest
+  URL               "${absl_gtest_download_url}"  # May be empty
+  SOURCE_DIR        "${absl_gtest_src_dir}"
+  BINARY_DIR        "${absl_gtest_build_dir}"
+  CONFIGURE_COMMAND ""
+  BUILD_COMMAND     ""
+  INSTALL_COMMAND   ""
+  TEST_COMMAND      ""
+)
diff --git a/abseil-cpp/CMake/README.md b/abseil-cpp/CMake/README.md
index 8f73475..c7ddee6 100644
--- a/abseil-cpp/CMake/README.md
+++ b/abseil-cpp/CMake/README.md
@@ -20,8 +20,10 @@
 ### Step-by-Step Instructions
 
 1. If you want to build the Abseil tests, integrate the Abseil dependency
-[Google Test](https://github.com/google/googletest) into your CMake project. To disable Abseil tests, you have to pass
-`-DBUILD_TESTING=OFF` when configuring your project with CMake.
+[Google Test](https://github.com/google/googletest) into your CMake
+project. To disable Abseil tests, you have to pass either
+`-DBUILD_TESTING=OFF` or `-DABSL_BUILD_TESTING=OFF` when configuring your
+project with CMake.
 
 2. Download Abseil and copy it into a subdirectory in your CMake project or add
 Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your
@@ -34,15 +36,16 @@
 4. Add the **absl::** target you wish to use to the
 [`target_link_libraries()`](https://cmake.org/cmake/help/latest/command/target_link_libraries.html)
 section of your executable or of your library.<br>
-Here is a short CMakeLists.txt example of a project file using Abseil.
+Here is a short CMakeLists.txt example of an application project using Abseil.
 
 ```cmake
-cmake_minimum_required(VERSION 3.5)
-project(my_project)
+cmake_minimum_required(VERSION 3.10)
+project(my_app_project)
 
 # Pick the C++ standard to compile with.
-# Abseil currently supports C++11, C++14, and C++17.
-set(CMAKE_CXX_STANDARD 11)
+# Abseil currently supports C++14, C++17, and C++20.
+set(CMAKE_CXX_STANDARD 14)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
 
 add_subdirectory(abseil-cpp)
 
@@ -50,9 +53,48 @@
 target_link_libraries(my_exe absl::base absl::synchronization absl::strings)
 ```
 
+Note that if you are developing a library designed for use by other clients, you
+should instead leave `CMAKE_CXX_STANDARD` unset (or only set if being built as
+the current top-level CMake project) and configure the minimum required C++
+standard at the target level. If you require a later minimum C++ standard than
+Abseil does, it's a good idea to also enforce that `CMAKE_CXX_STANDARD` (which
+will control Abseil library targets) is set to at least that minimum. For
+example:
+
+```cmake
+cmake_minimum_required(VERSION 3.10)
+project(my_lib_project)
+
+# Leave C++ standard up to the root application, so set it only if this is the
+# current top-level CMake project.
+if(CMAKE_SOURCE_DIR STREQUAL my_lib_project_SOURCE_DIR)
+  set(CMAKE_CXX_STANDARD 17)
+  set(CMAKE_CXX_STANDARD_REQUIRED ON)
+endif()
+
+add_subdirectory(abseil-cpp)
+
+add_library(my_lib source.cpp)
+target_link_libraries(my_lib absl::base absl::synchronization absl::strings)
+
+# Enforce that my_lib requires C++17. Important to document for clients that they
+# must set CMAKE_CXX_STANDARD to 17 or higher for proper Abseil ABI compatibility
+# (since otherwise, Abseil library targets could be compiled with a lower C++
+# standard than my_lib).
+target_compile_features(my_lib PUBLIC cxx_std_17)
+if(CMAKE_CXX_STANDARD LESS 17)
+  message(FATAL_ERROR
+      "my_lib_project requires CMAKE_CXX_STANDARD >= 17 (got: ${CMAKE_CXX_STANDARD})")
+endif()
+```
+
+Then the top-level application project that uses your library is responsible for
+setting a consistent `CMAKE_CXX_STANDARD` that is sufficiently high.
+
 ### Running Abseil Tests with CMake
 
-Use the `-DABSL_RUN_TESTS=ON` flag to run Abseil tests.  Note that if the `-DBUILD_TESTING=OFF` flag is passed then Abseil tests will not be run.
+Use the `-DABSL_BUILD_TESTING=ON` flag to run Abseil tests.  Note that
+BUILD_TESTING must also be on (the default).
 
 You will need to provide Abseil with a Googletest dependency.  There are two
 options for how to do this:
@@ -70,7 +112,7 @@
 cd path/to/abseil-cpp
 mkdir build
 cd build
-cmake -DABSL_USE_GOOGLETEST_HEAD=ON -DABSL_RUN_TESTS=ON ..
+cmake -DABSL_BUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON ..
 make -j
 ctest
 ```
@@ -99,3 +141,48 @@
 absl::time
 absl::utility
 ```
+
+## Traditional CMake Set-Up
+
+For larger projects, it may make sense to use the traditional CMake set-up where you build and install projects separately.
+
+First, you'd need to build and install Google Test:
+```
+cmake -S /source/googletest -B /build/googletest -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/installation/dir -DBUILD_GMOCK=ON
+cmake --build /build/googletest --target install
+```
+
+Then you need to configure and build Abseil. Make sure you enable `ABSL_USE_EXTERNAL_GOOGLETEST` and `ABSL_FIND_GOOGLETEST`. You also need to enable `ABSL_ENABLE_INSTALL` so that you can install Abseil itself.
+```
+cmake -S /source/abseil-cpp -B /build/abseil-cpp -DCMAKE_PREFIX_PATH=/installation/dir -DCMAKE_INSTALL_PREFIX=/installation/dir -DABSL_ENABLE_INSTALL=ON -DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON
+cmake --build /temporary/build/abseil-cpp
+```
+
+(`CMAKE_PREFIX_PATH` is where you already have Google Test installed; `CMAKE_INSTALL_PREFIX` is where you want to have Abseil installed; they can be different.)
+
+Run the tests:
+```
+ctest --test-dir /temporary/build/abseil-cpp
+```
+
+And finally install:
+```
+cmake --build /temporary/build/abseil-cpp --target install
+```
+
+# CMake Option Synopsis
+
+## Enable Standard CMake Installation
+
+`-DABSL_ENABLE_INSTALL=ON`
+
+## Google Test Options
+
+`-DABSL_BUILD_TESTING=ON` must be set to enable testing
+
+- Have Abseil download and build Google Test for you: `-DABSL_USE_EXTERNAL_GOOGLETEST=OFF` (default)
+  - Download and build latest Google Test: `-DABSL_USE_GOOGLETEST_HEAD=ON`
+  - Download specific Google Test version (ZIP archive): `-DABSL_GOOGLETEST_DOWNLOAD_URL=https://.../version.zip`
+  - Use Google Test from specific local directory: `-DABSL_LOCAL_GOOGLETEST_DIR=/path/to/googletest`
+- Use Google Test included elsewhere in your project: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON`
+- Use standard CMake `find_package(CTest)` to find installed Google Test: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON`
diff --git a/abseil-cpp/CMake/install_test_project/CMakeLists.txt b/abseil-cpp/CMake/install_test_project/CMakeLists.txt
index 06b797e..30c23b2 100644
--- a/abseil-cpp/CMake/install_test_project/CMakeLists.txt
+++ b/abseil-cpp/CMake/install_test_project/CMakeLists.txt
@@ -15,13 +15,11 @@
 
 # A simple CMakeLists.txt for testing cmake installation
 
-cmake_minimum_required(VERSION 3.5)
+cmake_minimum_required(VERSION 3.10)
 project(absl_cmake_testing CXX)
 
-set(CMAKE_CXX_STANDARD 11)
-
 add_executable(simple simple.cc)
 
 find_package(absl REQUIRED)
 
-target_link_libraries(simple absl::strings)
+target_link_libraries(simple absl::strings absl::config)
diff --git a/abseil-cpp/CMake/install_test_project/simple.cc b/abseil-cpp/CMake/install_test_project/simple.cc
index e9e3529..7daa7f0 100644
--- a/abseil-cpp/CMake/install_test_project/simple.cc
+++ b/abseil-cpp/CMake/install_test_project/simple.cc
@@ -14,8 +14,17 @@
 // limitations under the License.
 
 #include <iostream>
+#include "absl/base/config.h"
 #include "absl/strings/substitute.h"
 
+#if !defined(ABSL_LTS_RELEASE_VERSION) || ABSL_LTS_RELEASE_VERSION != 99998877
+#error ABSL_LTS_RELEASE_VERSION is not set correctly.
+#endif
+
+#if !defined(ABSL_LTS_RELEASE_PATCH_LEVEL) || ABSL_LTS_RELEASE_PATCH_LEVEL != 0
+#error ABSL_LTS_RELEASE_PATCH_LEVEL is not set correctly.
+#endif
+
 int main(int argc, char** argv) {
   for (int i = 0; i < argc; ++i) {
     std::cout << absl::Substitute("Arg $0: $1\n", i, argv[i]);
diff --git a/abseil-cpp/CMake/install_test_project/test.sh b/abseil-cpp/CMake/install_test_project/test.sh
index 99989b0..cc028ba 100755
--- a/abseil-cpp/CMake/install_test_project/test.sh
+++ b/abseil-cpp/CMake/install_test_project/test.sh
@@ -13,70 +13,60 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-# "Unit" and integration tests for Absl CMake installation
-
-# TODO(absl-team): This script isn't fully hermetic because
-# -DABSL_USE_GOOGLETEST_HEAD=ON means that this script isn't pinned to a fixed
-# version of GoogleTest. This means that an upstream change to GoogleTest could
-# break this test. Fix this by allowing this script to pin to a known-good
-# version of GoogleTest.
+#
+# Unit and integration tests for Abseil LTS CMake installation
 
 # Fail on any error. Treat unset variables an error. Print commands as executed.
 set -euox pipefail
 
-install_absl() {
-  pushd "${absl_build_dir}"
-  if [[ "${#}" -eq 1 ]]; then
-    cmake -DCMAKE_INSTALL_PREFIX="${1}" "${absl_dir}"
-  else
-    cmake "${absl_dir}"
-  fi
-  cmake --build . --target install -- -j
-  popd
-}
-
-uninstall_absl() {
-  xargs rm < "${absl_build_dir}"/install_manifest.txt
-  rm -rf "${absl_build_dir}"
-  mkdir -p "${absl_build_dir}"
-}
-
-lts_install=""
-
-while getopts ":l" lts; do
-  case "${lts}" in
-    l )
-      lts_install="true"
-      ;;
-  esac
-done
-
 absl_dir=/abseil-cpp
-absl_build_dir=/buildfs/absl-build
+absl_build_dir=/buildfs
+googletest_builddir=/googletest_builddir
 project_dir="${absl_dir}"/CMake/install_test_project
 project_build_dir=/buildfs/project-build
 
-mkdir -p "${absl_build_dir}"
+build_shared_libs="OFF"
+if [ "${LINK_TYPE:-}" = "DYNAMIC" ]; then
+  build_shared_libs="ON"
+fi
+
+# Build and install GoogleTest
+mkdir "${googletest_builddir}"
+pushd "${googletest_builddir}"
+curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${ABSL_GOOGLETEST_COMMIT}".zip
+unzip "${ABSL_GOOGLETEST_COMMIT}".zip
+pushd "googletest-${ABSL_GOOGLETEST_COMMIT}"
+mkdir build
+pushd build
+cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS="${build_shared_libs}" ..
+make -j $(nproc)
+make install
+ldconfig
+popd
+popd
+popd
+
+# Run the LTS transformations
+./create_lts.py 99998877
+
+# Build and install Abseil
+pushd "${absl_build_dir}"
+cmake "${absl_dir}" \
+  -DABSL_USE_EXTERNAL_GOOGLETEST=ON \
+  -DABSL_FIND_GOOGLETEST=ON  \
+  -DCMAKE_BUILD_TYPE=Release \
+  -DABSL_BUILD_TESTING=ON \
+  -DBUILD_SHARED_LIBS="${build_shared_libs}"
+make -j $(nproc)
+ctest -j $(nproc) --output-on-failure
+make install
+ldconfig
+popd
+
+# Test the project against the installed Abseil
 mkdir -p "${project_build_dir}"
-
-if [[ "${lts_install}" ]]; then
-  install_dir="/usr/local"
-else
-  install_dir="${project_build_dir}"/install
-fi
-mkdir -p "${install_dir}"
-
-# Test build, install, and link against installed abseil
 pushd "${project_build_dir}"
-if [[ "${lts_install}" ]]; then
-  install_absl
-  cmake "${project_dir}"
-else
-  install_absl "${install_dir}"
-  cmake "${project_dir}" -DCMAKE_PREFIX_PATH="${install_dir}"
-fi
-
+cmake "${project_dir}"
 cmake --build . --target simple
 
 output="$(${project_build_dir}/simple "printme" 2>&1)"
@@ -88,57 +78,35 @@
 
 popd
 
-# Test that we haven't accidentally made absl::abslblah
-pushd "${install_dir}"
-
-# Starting in CMake 3.12 the default install dir is lib$bit_width
-if [[ -d lib64 ]]; then
-  libdir="lib64"
-elif [[ -d lib ]]; then
-  libdir="lib"
-else
-  echo "ls *, */*, */*/*:"
-  ls *
-  ls */*
-  ls */*/*
-  echo "unknown lib dir"
-fi
-
-if [[ "${lts_install}" ]]; then
-  # LTS versions append the date of the release to the subdir.
-  # 9999/99/99 is the dummy date used in the local_lts workflow.
-  absl_subdir="absl_99999999"
-else
-  absl_subdir="absl"
-fi
-
-if ! grep absl::strings "${libdir}/cmake/${absl_subdir}/abslTargets.cmake"; then
-  cat "${libdir}"/cmake/absl/abslTargets.cmake
+if ! grep absl::strings "/usr/local/lib/cmake/absl/abslTargets.cmake"; then
+  cat "/usr/local/lib/cmake/absl/abslTargets.cmake"
   echo "CMake targets named incorrectly"
   exit 1
 fi
 
-uninstall_absl
-popd
+pushd "${HOME}"
+cat > hello-abseil.cc << EOF
+#include <cstdlib>
 
-if [[ ! "${lts_install}" ]]; then
-  # Test that we warn if installed without a prefix or a system prefix
-  output="$(install_absl 2>&1)"
-  if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then
-    echo "Install without prefix didn't warn as expected. Output:"
-    echo "${output}"
-    exit 1
-  fi
-  uninstall_absl
+#include "absl/strings/str_format.h"
 
-  output="$(install_absl /usr 2>&1)"
-  if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then
-    echo "Install with /usr didn't warn as expected. Output:"
-    echo "${output}"
-    exit 1
-  fi
-  uninstall_absl
+int main(int argc, char **argv) {
+  absl::PrintF("Hello Abseil!\n");
+  return EXIT_SUCCESS;
+}
+EOF
+
+if [ "${LINK_TYPE:-}" != "DYNAMIC" ]; then
+  pc_args=($(pkg-config --cflags --libs --static absl_str_format))
+  g++ -static -o hello-abseil hello-abseil.cc "${pc_args[@]}"
+else
+  pc_args=($(pkg-config --cflags --libs absl_str_format))
+  g++ -o hello-abseil hello-abseil.cc "${pc_args[@]}"
 fi
+hello="$(./hello-abseil)"
+[[ "${hello}" == "Hello Abseil!" ]]
+
+popd
 
 echo "Install test complete!"
 exit 0
diff --git a/abseil-cpp/CMakeLists.txt b/abseil-cpp/CMakeLists.txt
index f0af6f6..eef6626 100644
--- a/abseil-cpp/CMakeLists.txt
+++ b/abseil-cpp/CMakeLists.txt
@@ -14,12 +14,9 @@
 # limitations under the License.
 #
 
-# Most widely used distributions have cmake 3.5 or greater available as of March
-# 2019.  A notable exception is RHEL-7 (CentOS7).  You can install a current
-# version of CMake by first installing Extra Packages for Enterprise Linux
-# (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29)
-# and then issuing `yum install cmake3` on the command line.
-cmake_minimum_required(VERSION 3.5)
+# https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md
+# As of 2022-09-06, CMake 3.10 is the minimum supported version.
+cmake_minimum_required(VERSION 3.10)
 
 # Compiler id for Apple Clang is now AppleClang.
 if (POLICY CMP0025)
@@ -41,7 +38,23 @@
   cmake_policy(SET CMP0077 NEW)
 endif (POLICY CMP0077)
 
-project(absl CXX)
+# Allow the user to specify the MSVC runtime
+if (POLICY CMP0091)
+  cmake_policy(SET CMP0091 NEW)
+endif (POLICY CMP0091)
+
+# try_compile() honors the CMAKE_CXX_STANDARD value
+if (POLICY CMP0067)
+  cmake_policy(SET CMP0067 NEW)
+endif (POLICY CMP0067)
+
+# Allow the user to specify the CMAKE_MSVC_DEBUG_INFORMATION_FORMAT
+if (POLICY CMP0141)
+  cmake_policy(SET CMP0141 NEW)
+endif (POLICY CMP0141)
+
+project(absl LANGUAGES CXX VERSION 20230802)
+include(CTest)
 
 # Output directory is correct by default for most build setups. However, when
 # building Abseil as a DLL, it is important to have the DLL in the same
@@ -51,19 +64,30 @@
 
 # when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp))
 # in the source tree of a project that uses it, install rules are disabled.
-if(NOT "^${CMAKE_SOURCE_DIR}$" STREQUAL "^${PROJECT_SOURCE_DIR}$")
+if(NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
   option(ABSL_ENABLE_INSTALL "Enable install rule" OFF)
 else()
   option(ABSL_ENABLE_INSTALL "Enable install rule" ON)
 endif()
 
+option(ABSL_PROPAGATE_CXX_STD
+  "Use CMake C++ standard meta features (e.g. cxx_std_14) that propagate to targets that link to Abseil"
+  OFF)  # TODO: Default to ON for CMake 3.8 and greater.
+if(NOT ABSL_PROPAGATE_CXX_STD)
+  message(WARNING "A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON for CMake 3.8 and up. We recommend enabling this option to ensure your project still builds correctly.")
+endif()
+
+option(ABSL_USE_SYSTEM_INCLUDES
+  "Silence warnings in Abseil headers by marking them as SYSTEM includes"
+  OFF)
+
 list(APPEND CMAKE_MODULE_PATH
   ${CMAKE_CURRENT_LIST_DIR}/CMake
   ${CMAKE_CURRENT_LIST_DIR}/absl/copts
 )
 
-include(AbseilInstallDirs)
 include(CMakePackageConfigHelpers)
+include(GNUInstallDirs)
 include(AbseilDll)
 include(AbseilHelpers)
 
@@ -92,64 +116,86 @@
 ## pthread
 find_package(Threads REQUIRED)
 
+include(CMakeDependentOption)
+
+option(ABSL_BUILD_TESTING
+  "If ON, Abseil will build all of Abseil's own tests." OFF)
+
+option(ABSL_BUILD_TEST_HELPERS
+  "If ON, Abseil will build libraries that you can use to write tests against Abseil code. This option requires that Abseil is configured to use GoogleTest."
+  OFF)
+
 option(ABSL_USE_EXTERNAL_GOOGLETEST
-  "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF)
+  "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subdirectory." OFF)
+
+cmake_dependent_option(ABSL_FIND_GOOGLETEST
+  "If ON, Abseil will use find_package(GTest) rather than assuming that GoogleTest is already provided by the including project."
+  ON
+  "ABSL_USE_EXTERNAL_GOOGLETEST"
+  OFF)
 
 
 option(ABSL_USE_GOOGLETEST_HEAD
-  "If ON, abseil will download HEAD from googletest at config time." OFF)
+  "If ON, abseil will download HEAD from GoogleTest at config time." OFF)
+
+set(ABSL_GOOGLETEST_DOWNLOAD_URL "" CACHE STRING "If set, download GoogleTest from this URL")
 
 set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH
-  "If ABSL_USE_GOOGLETEST_HEAD is OFF, specifies the directory of a local googletest checkout."
+  "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout."
   )
 
-option(ABSL_RUN_TESTS "If ON, Abseil tests will be run." OFF)
-
-if(${ABSL_RUN_TESTS})
-  # enable CTest.  This will set BUILD_TESTING to ON unless otherwise specified
-  # on the command line
-  include(CTest)
-
-  ## check targets
-  if (NOT ABSL_USE_EXTERNAL_GOOGLETEST)
+if((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS)
+  if (ABSL_USE_EXTERNAL_GOOGLETEST)
+    if (ABSL_FIND_GOOGLETEST)
+      find_package(GTest REQUIRED)
+    elseif(NOT TARGET GTest::gtest)
+      if(TARGET gtest)
+        # When Google Test is included directly rather than through find_package, the aliases are missing.
+        add_library(GTest::gtest ALIAS gtest)
+        add_library(GTest::gtest_main ALIAS gtest_main)
+        add_library(GTest::gmock ALIAS gmock)
+        add_library(GTest::gmock_main ALIAS gmock_main)
+      else()
+        message(FATAL_ERROR "ABSL_USE_EXTERNAL_GOOGLETEST is ON and ABSL_FIND_GOOGLETEST is OFF, which means that the top-level project must build the Google Test project. However, the target gtest was not found.")
+      endif()
+    endif()
+  else()
     set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build)
-    if(${ABSL_USE_GOOGLETEST_HEAD})
+    if(ABSL_USE_GOOGLETEST_HEAD AND ABSL_GOOGLETEST_DOWNLOAD_URL)
+      message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL")
+    endif()
+    if(ABSL_USE_GOOGLETEST_HEAD)
+      set(absl_gtest_download_url "https://github.com/google/googletest/archive/main.zip")
+    elseif(ABSL_GOOGLETEST_DOWNLOAD_URL)
+      set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL})
+    endif()
+    if(absl_gtest_download_url)
       set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src)
     else()
       set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR})
     endif()
     include(CMake/Googletest/DownloadGTest.cmake)
   endif()
-
-  check_target(gtest)
-  check_target(gtest_main)
-  check_target(gmock)
-
-  list(APPEND ABSL_TEST_COMMON_LIBRARIES
-    gtest_main
-    gtest
-    gmock
-    ${CMAKE_THREAD_LIBS_INIT}
-  )
 endif()
 
 add_subdirectory(absl)
 
 if(ABSL_ENABLE_INSTALL)
+  
 
   # install as a subdirectory only
   install(EXPORT ${PROJECT_NAME}Targets
     NAMESPACE absl::
-    DESTINATION "${ABSL_INSTALL_CONFIGDIR}"
+    DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
   )
 
   configure_package_config_file(
     CMake/abslConfig.cmake.in
     "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
-    INSTALL_DESTINATION "${ABSL_INSTALL_CONFIGDIR}"
+    INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
   )
   install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
-    DESTINATION "${ABSL_INSTALL_CONFIGDIR}"
+    DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
   )
 
   # Abseil only has a version in LTS releases.  This mechanism is accomplished
@@ -162,16 +208,37 @@
     )
 
     install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake"
-      DESTINATION ${ABSL_INSTALL_CONFIGDIR}
+      DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
     )
   endif()  # absl_VERSION
 
   install(DIRECTORY absl
-    DESTINATION ${ABSL_INSTALL_INCLUDEDIR}
+    DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
     FILES_MATCHING
       PATTERN "*.inc"
       PATTERN "*.h"
       PATTERN "copts" EXCLUDE
       PATTERN "testdata" EXCLUDE
     )
+
+  file(READ "absl/base/options.h" ABSL_INTERNAL_OPTIONS_H_CONTENTS)
+  if (ABSL_INTERNAL_AT_LEAST_CXX17)
+    string(REGEX REPLACE
+      "#define ABSL_OPTION_USE_STD_([^ ]*) 2"
+      "#define ABSL_OPTION_USE_STD_\\1 1"
+      ABSL_INTERNAL_OPTIONS_H_PINNED
+      "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}")
+  else()
+    string(REGEX REPLACE
+      "#define ABSL_OPTION_USE_STD_([^ ]*) 2"
+      "#define ABSL_OPTION_USE_STD_\\1 0"
+      ABSL_INTERNAL_OPTIONS_H_PINNED
+      "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}")
+  endif()
+  file(WRITE "${CMAKE_BINARY_DIR}/options-pinned.h" "${ABSL_INTERNAL_OPTIONS_H_PINNED}")
+
+  install(FILES "${CMAKE_BINARY_DIR}/options-pinned.h"
+         DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/absl/base
+         RENAME "options.h")
+
 endif()  # ABSL_ENABLE_INSTALL
diff --git a/abseil-cpp/CONTRIBUTING.md b/abseil-cpp/CONTRIBUTING.md
index 9dadae9..a87254c 100644
--- a/abseil-cpp/CONTRIBUTING.md
+++ b/abseil-cpp/CONTRIBUTING.md
@@ -75,9 +75,9 @@
 
 ## Guidelines for Pull Requests
 
-*   If you are a Googler, it is preferable to first create an internal CL and
-    have it reviewed and submitted. The code propagation process will deliver
-    the change to GitHub.
+*   If you are a Googler, it is required that you send us a Piper CL instead of
+    using the GitHub pull-request process. The code propagation process will
+    deliver the change to GitHub.
 
 *   Create **small PRs** that are narrowly focused on **addressing a single
     concern**. We often receive PRs that are trying to fix several things at a
diff --git a/abseil-cpp/FAQ.md b/abseil-cpp/FAQ.md
index 78028fc..fbd92ce 100644
--- a/abseil-cpp/FAQ.md
+++ b/abseil-cpp/FAQ.md
@@ -27,7 +27,10 @@
   file](https://docs.bazel.build/versions/master/guide.html#bazelrc)
 
 If you are using CMake as the build system, you'll need to add a line like
-`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. See the
+`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. If you
+are developing a library designed to be used by other clients, you should
+instead leave `CMAKE_CXX_STANDARD` unset and configure the minimum C++ standard
+required by each of your library targets via `target_compile_features`. See the
 [CMake build
 instructions](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md)
 for more information.
diff --git a/abseil-cpp/LTS.md b/abseil-cpp/LTS.md
deleted file mode 100644
index ade8b17..0000000
--- a/abseil-cpp/LTS.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Long Term Support (LTS) Branches
-
-This repository contains periodic snapshots of the Abseil codebase that are
-Long Term Support (LTS) branches. An LTS branch allows you to use a known
-version of Abseil without interfering with other projects which may also, in
-turn, use Abseil. (For more information about our releases, see the
-[Abseil Release Management](https://abseil.io/about/releases) guide.)
-
-## LTS Branches
-
-The following lists LTS branches and the dates on which they have been released:
-
-* [LTS Branch December 18, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_12_18/)
-* [LTS Branch June 20, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_06_20/)
-* [LTS Branch August 8, 2019](https://github.com/abseil/abseil-cpp/tree/lts_2019_08_08/)
-* [LTS Branch February 25, 2020](https://github.com/abseil/abseil-cpp/tree/lts_2020_02_25/)
diff --git a/abseil-cpp/METADATA b/abseil-cpp/METADATA
deleted file mode 100644
index 5ebc613..0000000
--- a/abseil-cpp/METADATA
+++ /dev/null
@@ -1,19 +0,0 @@
-name: "extern/abseil-cpp"
-description:
-    "An open-source collection of C++ code designed to augment the C++ standard "
-    "library"
-
-third_party {
-  url {
-    type: HOMEPAGE
-    value: "https://abseil.io"
-  }
-  url {
-    type: GIT
-    value: "https://github.com/abseil/abseil-cpp.git"
-  }
-  license_type: NOTICE
-  version: "20200923.2"
-  last_upgrade_date { year: 2020 month: 10 day: 28 }
-}
-
diff --git a/abseil-cpp/README.md b/abseil-cpp/README.md
index 85de569..f834fcd 100644
--- a/abseil-cpp/README.md
+++ b/abseil-cpp/README.md
@@ -1,7 +1,7 @@
 # Abseil - C++ Common Libraries
 
 The repository contains the Abseil C++ library code. Abseil is an open-source
-collection of C++ code (compliant to C++11) designed to augment the C++
+collection of C++ code (compliant to C++14) designed to augment the C++
 standard library.
 
 ## Table of Contents
@@ -9,7 +9,9 @@
 - [About Abseil](#about)
 - [Quickstart](#quickstart)
 - [Building Abseil](#build)
+- [Support](#support)
 - [Codemap](#codemap)
+- [Releases](#releases)
 - [License](#license)
 - [Links](#links)
 
@@ -42,46 +44,78 @@
 <a name="build"></a>
 ## Building Abseil
 
-[Bazel](https://bazel.build) is the official build system for Abseil,
-which is supported on most major platforms (Linux, Windows, macOS, for example)
-and compilers. See the [quickstart](https://abseil.io/docs/cpp/quickstart) for
-more information on building Abseil using the Bazel build system.
+[Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official
+build systems for Abseil.
+See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information
+on building Abseil using the Bazel build system.
+If you require CMake support, please check the [CMake build
+instructions](CMake/README.md) and [CMake
+Quickstart](https://abseil.io/docs/cpp/quickstart-cmake).
 
-<a name="cmake"></a>
-If you require CMake support, please check the
-[CMake build instructions](CMake/README.md).
+<a name="support"></a>
+## Support
 
+Abseil follows Google's [Foundational C++ Support
+Policy](https://opensource.google/documentation/policies/cplusplus-support). See
+[this
+table](https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md)
+for a list of currently supported versions compilers, platforms, and build
+tools.
+
+<a name="codemap"></a>
 ## Codemap
 
 Abseil contains the following C++ library components:
 
-* [`base`](absl/base/) Abseil Fundamentals
+* [`base`](absl/base/)
   <br /> The `base` library contains initialization code and other code which
   all other Abseil code depends on. Code within `base` may not depend on any
   other code (other than the C++ standard library).
 * [`algorithm`](absl/algorithm/)
   <br /> The `algorithm` library contains additions to the C++ `<algorithm>`
   library and container-based versions of such algorithms.
+* [`cleanup`](absl/cleanup/)
+  <br /> The `cleanup` library contains the control-flow-construct-like type
+  `absl::Cleanup` which is used for executing a callback on scope exit.
 * [`container`](absl/container/)
   <br /> The `container` library contains additional STL-style containers,
   including Abseil's unordered "Swiss table" containers.
+* [`crc`](absl/crc/) The `crc` library contains code for
+  computing error-detecting cyclic redundancy checks on data.
 * [`debugging`](absl/debugging/)
   <br /> The `debugging` library contains code useful for enabling leak
   checks, and stacktrace and symbolization utilities.
+* [`flags`](absl/flags/)
+  <br /> The `flags` library contains code for handling command line flags for
+  libraries and binaries built with Abseil.
 * [`hash`](absl/hash/)
   <br /> The `hash` library contains the hashing framework and default hash
   functor implementations for hashable types in Abseil.
+* [`log`](absl/log/)
+  <br /> The `log` library contains `LOG` and `CHECK` macros and facilities
+  for writing logged messages out to disk, `stderr`, or user-extensible
+  destinations.
 * [`memory`](absl/memory/)
-  <br /> The `memory` library contains C++11-compatible versions of
-  `std::make_unique()` and related memory management facilities.
+  <br /> The `memory` library contains memory management facilities that augment
+  C++'s `<memory>` library.
 * [`meta`](absl/meta/)
-  <br /> The `meta` library contains C++11-compatible versions of type checks
+  <br /> The `meta` library contains compatible versions of type checks
   available within C++14 and C++17 versions of the C++ `<type_traits>` library.
 * [`numeric`](absl/numeric/)
-  <br /> The `numeric` library contains C++11-compatible 128-bit integers.
+  <br /> The `numeric` library contains 128-bit integer types as well as
+  implementations of C++20's bitwise math functions.
+* [`profiling`](absl/profiling/)
+  <br /> The `profiling` library contains utility code for profiling C++
+  entities.  It is currently a private dependency of other Abseil libraries.
+* [`random`](absl/random/)
+  <br /> The `random` library contains functions for generating psuedorandom
+  values.
+* [`status`](absl/status/)
+  <br /> The `status` library contains abstractions for error handling,
+  specifically `absl::Status` and `absl::StatusOr<T>`.
 * [`strings`](absl/strings/)
   <br /> The `strings` library contains a variety of strings routines and
-  utilities, including a C++11-compatible version of the C++17
+  utilities, including a C++14-compatible version of the C++17
   `std::string_view` type.
 * [`synchronization`](absl/synchronization/)
   <br /> The `synchronization` library contains concurrency primitives (Abseil's
@@ -93,15 +127,27 @@
   time zones.
 * [`types`](absl/types/)
   <br /> The `types` library contains non-container utility types, like a
-  C++11-compatible version of the C++17 `std::optional` type.
+  C++14-compatible version of the C++17 `std::optional` type.
 * [`utility`](absl/utility/)
   <br /> The `utility` library contains utility and helper code.
 
+<a name="releases"></a>
+## Releases
+
+Abseil recommends users "live-at-head" (update to the latest commit from the
+master branch as often as possible). However, we realize this philosophy doesn't
+work for every project, so we also provide [Long Term Support
+Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport
+fixes for severe bugs. See our [release
+management](https://abseil.io/about/releases) document for more details.
+
+<a name="license"></a>
 ## License
 
 The Abseil C++ library is licensed under the terms of the Apache
 license. See [LICENSE](LICENSE) for more information.
 
+<a name="links"></a>
 ## Links
 
 For more information about Abseil:
diff --git a/abseil-cpp/UPGRADES.md b/abseil-cpp/UPGRADES.md
index 35599d0..3cac141 100644
--- a/abseil-cpp/UPGRADES.md
+++ b/abseil-cpp/UPGRADES.md
@@ -1,6 +1,6 @@
 # C++ Upgrade Tools
 
-Abseil may occassionally release API-breaking changes. As noted in our
+Abseil may occasionally release API-breaking changes. As noted in our
 [Compatibility Guidelines][compatibility-guide], we will aim to provide a tool
 to do the work of effecting such API-breaking changes, when absolutely
 necessary.
diff --git a/abseil-cpp/WORKSPACE b/abseil-cpp/WORKSPACE
index 0b53356..fdb615f 100644
--- a/abseil-cpp/WORKSPACE
+++ b/abseil-cpp/WORKSPACE
@@ -15,30 +15,46 @@
 #
 
 workspace(name = "com_google_absl")
+
 load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
 
 # GoogleTest/GoogleMock framework. Used by most unit-tests.
 http_archive(
-    name = "com_google_googletest",
-    urls = ["https://github.com/google/googletest/archive/8567b09290fe402cf01923e2131c5635b8ed851b.zip"],  # 2020-06-12T22:24:28Z
-    strip_prefix = "googletest-8567b09290fe402cf01923e2131c5635b8ed851b",
-    sha256 = "9a8a166eb6a56c7b3d7b19dc2c946fe4778fd6f21c7a12368ad3b836d8f1be48",
+    name = "com_google_googletest",  # 2023-08-02T16:45:10Z
+    sha256 = "1f357c27ca988c3f7c6b4bf68a9395005ac6761f034046e9dde0896e3aba00e4",
+    strip_prefix = "googletest-1.14.0",
+    # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh.
+    urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.zip"],
+)
+
+# RE2 (the regular expression library used by GoogleTest)
+http_archive(
+    name = "com_googlesource_code_re2",  # 2023-03-17T11:36:51Z
+    sha256 = "cb8b5312a65f2598954545a76e8bce913f35fbb3a21a5c88797a4448e9f9b9d9",
+    strip_prefix = "re2-578843a516fd1da7084ae46209a75f3613b6065e",
+    urls = ["https://github.com/google/re2/archive/578843a516fd1da7084ae46209a75f3613b6065e.zip"],
 )
 
 # Google benchmark.
 http_archive(
-    name = "com_github_google_benchmark",
-    urls = ["https://github.com/google/benchmark/archive/16703ff83c1ae6d53e5155df3bb3ab0bc96083be.zip"],
-    strip_prefix = "benchmark-16703ff83c1ae6d53e5155df3bb3ab0bc96083be",
-    sha256 = "59f918c8ccd4d74b6ac43484467b500f1d64b40cc1010daa055375b322a43ba3",
+    name = "com_github_google_benchmark",  # 2023-08-01T07:47:09Z
+    sha256 = "db1e39ee71dc38aa7e57ed007f2c8b3bb59e13656435974781a9dc0617d75cc9",
+    strip_prefix = "benchmark-02a354f3f323ae8256948e1dc77ddcb1dfc297da",
+    urls = ["https://github.com/google/benchmark/archive/02a354f3f323ae8256948e1dc77ddcb1dfc297da.zip"],
 )
 
-# C++ rules for Bazel.
+# Bazel Skylib.
 http_archive(
-    name = "rules_cc",
-    sha256 = "9a446e9dd9c1bb180c86977a8dc1e9e659550ae732ae58bd2e8fd51e15b2c91d",
-    strip_prefix = "rules_cc-262ebec3c2296296526740db4aefce68c80de7fa",
-    urls = [
-        "https://github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip",
-    ],
+    name = "bazel_skylib",  # 2023-05-31T19:24:07Z
+    sha256 = "08c0386f45821ce246bbbf77503c973246ed6ee5c3463e41efc197fa9bc3a7f4",
+    strip_prefix = "bazel-skylib-288731ef9f7f688932bd50e704a91a45ec185f9b",
+    urls = ["https://github.com/bazelbuild/bazel-skylib/archive/288731ef9f7f688932bd50e704a91a45ec185f9b.zip"],
+)
+
+# Bazel platform rules.
+http_archive(
+    name = "platforms",  # 2023-07-28T19:44:27Z
+    sha256 = "40eb313613ff00a5c03eed20aba58890046f4d38dec7344f00bb9a8867853526",
+    strip_prefix = "platforms-4ad40ef271da8176d4fc0194d2089b8a76e19d7b",
+    urls = ["https://github.com/bazelbuild/platforms/archive/4ad40ef271da8176d4fc0194d2089b8a76e19d7b.zip"],
 )
diff --git a/abseil-cpp/absl/BUILD.bazel b/abseil-cpp/absl/BUILD.bazel
index 0b772df..253c0ae 100644
--- a/abseil-cpp/absl/BUILD.bazel
+++ b/abseil-cpp/absl/BUILD.bazel
@@ -14,44 +14,75 @@
 # limitations under the License.
 #
 
-load(
-    ":compiler_config_setting.bzl",
-    "create_llvm_config",
-)
+load("@bazel_skylib//lib:selects.bzl", "selects")
 
 package(default_visibility = ["//visibility:public"])
 
 licenses(["notice"])
 
-create_llvm_config(
-    name = "llvm_compiler",
+config_setting(
+    name = "clang_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "clang",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "gcc_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "gcc",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "mingw_unspecified_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "mingw",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "mingw-gcc_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "mingw-gcc",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "msvc_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "msvc-cl",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "clang-cl_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "clang-cl",
+    },
     visibility = [":__subpackages__"],
 )
 
 config_setting(
     name = "osx",
     constraint_values = [
-        "@bazel_tools//platforms:osx",
+        "@platforms//os:osx",
     ],
 )
 
 config_setting(
     name = "ios",
     constraint_values = [
-        "@bazel_tools//platforms:ios",
+        "@platforms//os:ios",
     ],
 )
 
 config_setting(
-    name = "windows",
-    constraint_values = [
-        "@bazel_tools//platforms:x86_64",
-        "@bazel_tools//platforms:windows",
-    ],
-    visibility = [":__subpackages__"],
-)
-
-config_setting(
     name = "ppc",
     values = {
         "cpu": "ppc",
@@ -60,9 +91,61 @@
 )
 
 config_setting(
-    name = "wasm",
+    name = "cpu_wasm",
+    values = {
+        "cpu": "wasm",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "cpu_wasm32",
     values = {
         "cpu": "wasm32",
     },
     visibility = [":__subpackages__"],
 )
+
+config_setting(
+    name = "platforms_wasm32",
+    constraint_values = [
+        "@platforms//cpu:wasm32",
+    ],
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "platforms_wasm64",
+    constraint_values = [
+        "@platforms//cpu:wasm64",
+    ],
+    visibility = [":__subpackages__"],
+)
+
+selects.config_setting_group(
+    name = "wasm",
+    match_any = [
+        ":cpu_wasm",
+        ":cpu_wasm32",
+        ":platforms_wasm32",
+        ":platforms_wasm64",
+    ],
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "fuchsia",
+    values = {
+        "cpu": "fuchsia",
+    },
+    visibility = [":__subpackages__"],
+)
+
+selects.config_setting_group(
+    name = "mingw_compiler",
+    match_any = [
+        ":mingw_unspecified_compiler",
+        ":mingw-gcc_compiler",
+    ],
+    visibility = [":__subpackages__"],
+)
diff --git a/abseil-cpp/absl/CMakeLists.txt b/abseil-cpp/absl/CMakeLists.txt
index fbfa782..3a7c12f 100644
--- a/abseil-cpp/absl/CMakeLists.txt
+++ b/abseil-cpp/absl/CMakeLists.txt
@@ -16,14 +16,18 @@
 
 add_subdirectory(base)
 add_subdirectory(algorithm)
+add_subdirectory(cleanup)
 add_subdirectory(container)
+add_subdirectory(crc)
 add_subdirectory(debugging)
 add_subdirectory(flags)
 add_subdirectory(functional)
 add_subdirectory(hash)
+add_subdirectory(log)
 add_subdirectory(memory)
 add_subdirectory(meta)
 add_subdirectory(numeric)
+add_subdirectory(profiling)
 add_subdirectory(random)
 add_subdirectory(status)
 add_subdirectory(strings)
@@ -34,4 +38,7 @@
 
 if (${ABSL_BUILD_DLL})
   absl_make_dll()
+  if (${ABSL_BUILD_TEST_HELPERS})
+    absl_make_dll(TEST ON)
+  endif()
 endif()
diff --git a/abseil-cpp/absl/abseil.podspec.gen.py b/abseil-cpp/absl/abseil.podspec.gen.py
index 6aefb79..6375298 100755
--- a/abseil-cpp/absl/abseil.podspec.gen.py
+++ b/abseil-cpp/absl/abseil.podspec.gen.py
@@ -40,8 +40,8 @@
     'USE_HEADERMAP' => 'NO',
     'ALWAYS_SEARCH_USER_PATHS' => 'NO',
   }
-  s.ios.deployment_target = '7.0'
-  s.osx.deployment_target = '10.9'
+  s.ios.deployment_target = '9.0'
+  s.osx.deployment_target = '10.10'
   s.tvos.deployment_target = '9.0'
   s.watchos.deployment_target = '2.0'
 """
diff --git a/abseil-cpp/absl/algorithm/BUILD.bazel b/abseil-cpp/absl/algorithm/BUILD.bazel
index a3002b7..3a9ab01 100644
--- a/abseil-cpp/absl/algorithm/BUILD.bazel
+++ b/abseil-cpp/absl/algorithm/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -44,12 +43,14 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":algorithm",
+        "//absl/base:config",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
-cc_test(
+cc_binary(
     name = "algorithm_benchmark",
+    testonly = 1,
     srcs = ["equal_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/abseil-cpp/absl/algorithm/CMakeLists.txt b/abseil-cpp/absl/algorithm/CMakeLists.txt
index 56cd0fb..181b49c 100644
--- a/abseil-cpp/absl/algorithm/CMakeLists.txt
+++ b/abseil-cpp/absl/algorithm/CMakeLists.txt
@@ -35,7 +35,8 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::algorithm
-    gmock_main
+    absl::config
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -65,5 +66,5 @@
     absl::core_headers
     absl::memory
     absl::span
-    gmock_main
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/algorithm/algorithm_test.cc b/abseil-cpp/absl/algorithm/algorithm_test.cc
index 81fccb6..d18df02 100644
--- a/abseil-cpp/absl/algorithm/algorithm_test.cc
+++ b/abseil-cpp/absl/algorithm/algorithm_test.cc
@@ -20,6 +20,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/base/config.h"
 
 namespace {
 
@@ -50,7 +51,15 @@
   std::vector<int> empty1;
   std::vector<int> empty2;
 
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105705
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnonnull"
+#endif
   EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end()));
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
   EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end()));
   EXPECT_TRUE(
       absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end()));
diff --git a/abseil-cpp/absl/algorithm/container.h b/abseil-cpp/absl/algorithm/container.h
index f0cee94..679e026 100644
--- a/abseil-cpp/absl/algorithm/container.h
+++ b/abseil-cpp/absl/algorithm/container.h
@@ -77,9 +77,8 @@
     decltype(std::make_pair(ContainerIter<C1>(), ContainerIter<C2>()));
 
 template <typename C>
-using ContainerDifferenceType =
-    decltype(std::distance(std::declval<ContainerIter<C>>(),
-                           std::declval<ContainerIter<C>>()));
+using ContainerDifferenceType = decltype(std::distance(
+    std::declval<ContainerIter<C>>(), std::declval<ContainerIter<C>>()));
 
 template <typename C>
 using ContainerPointerType =
@@ -90,17 +89,21 @@
 // lookup of std::begin and std::end, i.e.
 //   using std::begin;
 //   using std::end;
-//   std::foo(begin(c), end(c);
+//   std::foo(begin(c), end(c));
 // becomes
 //   std::foo(container_algorithm_internal::begin(c),
-//   container_algorithm_internal::end(c));
+//            container_algorithm_internal::end(c));
 // These are meant for internal use only.
 
 template <typename C>
-ContainerIter<C> c_begin(C& c) { return begin(c); }
+ContainerIter<C> c_begin(C& c) {
+  return begin(c);
+}
 
 template <typename C>
-ContainerIter<C> c_end(C& c) { return end(c); }
+ContainerIter<C> c_end(C& c) {
+  return end(c);
+}
 
 template <typename T>
 struct IsUnorderedContainer : std::false_type {};
@@ -166,7 +169,7 @@
 // c_all_of()
 //
 // Container-based version of the <algorithm> `std::all_of()` function to
-// test a condition on all elements within a container.
+// test if all elements within a container satisfy a condition.
 template <typename C, typename Pred>
 bool c_all_of(const C& c, Pred&& pred) {
   return std::all_of(container_algorithm_internal::c_begin(c),
@@ -188,7 +191,7 @@
 // c_none_of()
 //
 // Container-based version of the <algorithm> `std::none_of()` function to
-// test if no elements in a container fulfil a condition.
+// test if no elements in a container fulfill a condition.
 template <typename C, typename Pred>
 bool c_none_of(const C& c, Pred&& pred) {
   return std::none_of(container_algorithm_internal::c_begin(c),
@@ -343,8 +346,8 @@
 // return the first element where two ordered containers differ. Applies `==` to
 // the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
 template <typename C1, typename C2>
-container_algorithm_internal::ContainerIterPairType<C1, C2>
-c_mismatch(C1& c1, C2& c2) {
+container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(C1& c1,
+                                                                       C2& c2) {
   auto first1 = container_algorithm_internal::c_begin(c1);
   auto last1 = container_algorithm_internal::c_end(c1);
   auto first2 = container_algorithm_internal::c_begin(c2);
@@ -365,8 +368,8 @@
 // the function's test condition. Applies `pred`to the first N elements of `c1`
 // and `c2`, where N = min(size(c1), size(c2)).
 template <typename C1, typename C2, typename BinaryPredicate>
-container_algorithm_internal::ContainerIterPairType<C1, C2>
-c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) {
+container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(
+    C1& c1, C2& c2, BinaryPredicate pred) {
   auto first1 = container_algorithm_internal::c_begin(c1);
   auto last1 = container_algorithm_internal::c_end(c1);
   auto first2 = container_algorithm_internal::c_begin(c2);
@@ -655,11 +658,10 @@
 // some condition, and return the results within an iterator.
 template <typename C, typename OutputIterator, typename Pred, typename T>
 OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred,
-                                 T&& new_value) {
+                                 const T& new_value) {
   return std::replace_copy_if(container_algorithm_internal::c_begin(c),
                               container_algorithm_internal::c_end(c), result,
-                              std::forward<Pred>(pred),
-                              std::forward<T>(new_value));
+                              std::forward<Pred>(pred), new_value);
 }
 
 // c_fill()
@@ -667,9 +669,9 @@
 // Container-based version of the <algorithm> `std::fill()` function to fill a
 // container with some value.
 template <typename C, typename T>
-void c_fill(C& c, T&& value) {
+void c_fill(C& c, const T& value) {
   std::fill(container_algorithm_internal::c_begin(c),
-            container_algorithm_internal::c_end(c), std::forward<T>(value));
+            container_algorithm_internal::c_end(c), value);
 }
 
 // c_fill_n()
@@ -677,9 +679,8 @@
 // Container-based version of the <algorithm> `std::fill_n()` function to fill
 // the first N elements in a container with some value.
 template <typename C, typename Size, typename T>
-void c_fill_n(C& c, Size n, T&& value) {
-  std::fill_n(container_algorithm_internal::c_begin(c), n,
-              std::forward<T>(value));
+void c_fill_n(C& c, Size n, const T& value) {
+  std::fill_n(container_algorithm_internal::c_begin(c), n, value);
 }
 
 // c_generate()
@@ -716,10 +717,11 @@
 // copy a container's elements while removing any elements matching the given
 // `value`.
 template <typename C, typename OutputIterator, typename T>
-OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) {
+OutputIterator c_remove_copy(const C& c, OutputIterator result,
+                             const T& value) {
   return std::remove_copy(container_algorithm_internal::c_begin(c),
                           container_algorithm_internal::c_end(c), result,
-                          std::forward<T>(value));
+                          value);
 }
 
 // c_remove_copy_if()
@@ -905,11 +907,11 @@
 
 // Overload of c_sort() for performing a `comp` comparison other than the
 // default `operator<`.
-template <typename C, typename Compare>
-void c_sort(C& c, Compare&& comp) {
+template <typename C, typename LessThan>
+void c_sort(C& c, LessThan&& comp) {
   std::sort(container_algorithm_internal::c_begin(c),
             container_algorithm_internal::c_end(c),
-            std::forward<Compare>(comp));
+            std::forward<LessThan>(comp));
 }
 
 // c_stable_sort()
@@ -925,11 +927,11 @@
 
 // Overload of c_stable_sort() for performing a `comp` comparison other than the
 // default `operator<`.
-template <typename C, typename Compare>
-void c_stable_sort(C& c, Compare&& comp) {
+template <typename C, typename LessThan>
+void c_stable_sort(C& c, LessThan&& comp) {
   std::stable_sort(container_algorithm_internal::c_begin(c),
                    container_algorithm_internal::c_end(c),
-                   std::forward<Compare>(comp));
+                   std::forward<LessThan>(comp));
 }
 
 // c_is_sorted()
@@ -944,11 +946,11 @@
 
 // c_is_sorted() overload for performing a `comp` comparison other than the
 // default `operator<`.
-template <typename C, typename Compare>
-bool c_is_sorted(const C& c, Compare&& comp) {
+template <typename C, typename LessThan>
+bool c_is_sorted(const C& c, LessThan&& comp) {
   return std::is_sorted(container_algorithm_internal::c_begin(c),
                         container_algorithm_internal::c_end(c),
-                        std::forward<Compare>(comp));
+                        std::forward<LessThan>(comp));
 }
 
 // c_partial_sort()
@@ -966,14 +968,14 @@
 
 // Overload of c_partial_sort() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename RandomAccessContainer, typename Compare>
+template <typename RandomAccessContainer, typename LessThan>
 void c_partial_sort(
     RandomAccessContainer& sequence,
     container_algorithm_internal::ContainerIter<RandomAccessContainer> middle,
-    Compare&& comp) {
+    LessThan&& comp) {
   std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
                     container_algorithm_internal::c_end(sequence),
-                    std::forward<Compare>(comp));
+                    std::forward<LessThan>(comp));
 }
 
 // c_partial_sort_copy()
@@ -994,15 +996,15 @@
 
 // Overload of c_partial_sort_copy() for performing a `comp` comparison other
 // than the default `operator<`.
-template <typename C, typename RandomAccessContainer, typename Compare>
+template <typename C, typename RandomAccessContainer, typename LessThan>
 container_algorithm_internal::ContainerIter<RandomAccessContainer>
 c_partial_sort_copy(const C& sequence, RandomAccessContainer& result,
-                    Compare&& comp) {
+                    LessThan&& comp) {
   return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
                                 container_algorithm_internal::c_end(sequence),
                                 container_algorithm_internal::c_begin(result),
                                 container_algorithm_internal::c_end(result),
-                                std::forward<Compare>(comp));
+                                std::forward<LessThan>(comp));
 }
 
 // c_is_sorted_until()
@@ -1018,12 +1020,12 @@
 
 // Overload of c_is_sorted_until() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename C, typename Compare>
+template <typename C, typename LessThan>
 container_algorithm_internal::ContainerIter<C> c_is_sorted_until(
-    C& c, Compare&& comp) {
+    C& c, LessThan&& comp) {
   return std::is_sorted_until(container_algorithm_internal::c_begin(c),
                               container_algorithm_internal::c_end(c),
-                              std::forward<Compare>(comp));
+                              std::forward<LessThan>(comp));
 }
 
 // c_nth_element()
@@ -1043,14 +1045,14 @@
 
 // Overload of c_nth_element() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename RandomAccessContainer, typename Compare>
+template <typename RandomAccessContainer, typename LessThan>
 void c_nth_element(
     RandomAccessContainer& sequence,
     container_algorithm_internal::ContainerIter<RandomAccessContainer> nth,
-    Compare&& comp) {
+    LessThan&& comp) {
   std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
                    container_algorithm_internal::c_end(sequence),
-                   std::forward<Compare>(comp));
+                   std::forward<LessThan>(comp));
 }
 
 //------------------------------------------------------------------------------
@@ -1064,20 +1066,19 @@
 // which does not compare less than `value`.
 template <typename Sequence, typename T>
 container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
-    Sequence& sequence, T&& value) {
+    Sequence& sequence, const T& value) {
   return std::lower_bound(container_algorithm_internal::c_begin(sequence),
-                          container_algorithm_internal::c_end(sequence),
-                          std::forward<T>(value));
+                          container_algorithm_internal::c_end(sequence), value);
 }
 
 // Overload of c_lower_bound() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename Sequence, typename T, typename Compare>
+template <typename Sequence, typename T, typename LessThan>
 container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
-    Sequence& sequence, T&& value, Compare&& comp) {
+    Sequence& sequence, const T& value, LessThan&& comp) {
   return std::lower_bound(container_algorithm_internal::c_begin(sequence),
-                          container_algorithm_internal::c_end(sequence),
-                          std::forward<T>(value), std::forward<Compare>(comp));
+                          container_algorithm_internal::c_end(sequence), value,
+                          std::forward<LessThan>(comp));
 }
 
 // c_upper_bound()
@@ -1087,20 +1088,19 @@
 // which is greater than `value`.
 template <typename Sequence, typename T>
 container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
-    Sequence& sequence, T&& value) {
+    Sequence& sequence, const T& value) {
   return std::upper_bound(container_algorithm_internal::c_begin(sequence),
-                          container_algorithm_internal::c_end(sequence),
-                          std::forward<T>(value));
+                          container_algorithm_internal::c_end(sequence), value);
 }
 
 // Overload of c_upper_bound() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename Sequence, typename T, typename Compare>
+template <typename Sequence, typename T, typename LessThan>
 container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
-    Sequence& sequence, T&& value, Compare&& comp) {
+    Sequence& sequence, const T& value, LessThan&& comp) {
   return std::upper_bound(container_algorithm_internal::c_begin(sequence),
-                          container_algorithm_internal::c_end(sequence),
-                          std::forward<T>(value), std::forward<Compare>(comp));
+                          container_algorithm_internal::c_end(sequence), value,
+                          std::forward<LessThan>(comp));
 }
 
 // c_equal_range()
@@ -1110,20 +1110,19 @@
 // sorted container which compare equal to `value`.
 template <typename Sequence, typename T>
 container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
-c_equal_range(Sequence& sequence, T&& value) {
+c_equal_range(Sequence& sequence, const T& value) {
   return std::equal_range(container_algorithm_internal::c_begin(sequence),
-                          container_algorithm_internal::c_end(sequence),
-                          std::forward<T>(value));
+                          container_algorithm_internal::c_end(sequence), value);
 }
 
 // Overload of c_equal_range() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename Sequence, typename T, typename Compare>
+template <typename Sequence, typename T, typename LessThan>
 container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
-c_equal_range(Sequence& sequence, T&& value, Compare&& comp) {
+c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) {
   return std::equal_range(container_algorithm_internal::c_begin(sequence),
-                          container_algorithm_internal::c_end(sequence),
-                          std::forward<T>(value), std::forward<Compare>(comp));
+                          container_algorithm_internal::c_end(sequence), value,
+                          std::forward<LessThan>(comp));
 }
 
 // c_binary_search()
@@ -1132,20 +1131,20 @@
 // to test if any element in the sorted container contains a value equivalent to
 // 'value'.
 template <typename Sequence, typename T>
-bool c_binary_search(Sequence&& sequence, T&& value) {
+bool c_binary_search(const Sequence& sequence, const T& value) {
   return std::binary_search(container_algorithm_internal::c_begin(sequence),
                             container_algorithm_internal::c_end(sequence),
-                            std::forward<T>(value));
+                            value);
 }
 
 // Overload of c_binary_search() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename Sequence, typename T, typename Compare>
-bool c_binary_search(Sequence&& sequence, T&& value, Compare&& comp) {
+template <typename Sequence, typename T, typename LessThan>
+bool c_binary_search(const Sequence& sequence, const T& value,
+                     LessThan&& comp) {
   return std::binary_search(container_algorithm_internal::c_begin(sequence),
                             container_algorithm_internal::c_end(sequence),
-                            std::forward<T>(value),
-                            std::forward<Compare>(comp));
+                            value, std::forward<LessThan>(comp));
 }
 
 //------------------------------------------------------------------------------
@@ -1166,14 +1165,14 @@
 
 // Overload of c_merge() for performing a `comp` comparison other than
 // the default `operator<`.
-template <typename C1, typename C2, typename OutputIterator, typename Compare>
+template <typename C1, typename C2, typename OutputIterator, typename LessThan>
 OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result,
-                       Compare&& comp) {
+                       LessThan&& comp) {
   return std::merge(container_algorithm_internal::c_begin(c1),
                     container_algorithm_internal::c_end(c1),
                     container_algorithm_internal::c_begin(c2),
                     container_algorithm_internal::c_end(c2), result,
-                    std::forward<Compare>(comp));
+                    std::forward<LessThan>(comp));
 }
 
 // c_inplace_merge()
@@ -1189,13 +1188,13 @@
 
 // Overload of c_inplace_merge() for performing a merge using a `comp` other
 // than `operator<`.
-template <typename C, typename Compare>
+template <typename C, typename LessThan>
 void c_inplace_merge(C& c,
                      container_algorithm_internal::ContainerIter<C> middle,
-                     Compare&& comp) {
+                     LessThan&& comp) {
   std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
                      container_algorithm_internal::c_end(c),
-                     std::forward<Compare>(comp));
+                     std::forward<LessThan>(comp));
 }
 
 // c_includes()
@@ -1213,13 +1212,13 @@
 
 // Overload of c_includes() for performing a merge using a `comp` other than
 // `operator<`.
-template <typename C1, typename C2, typename Compare>
-bool c_includes(const C1& c1, const C2& c2, Compare&& comp) {
+template <typename C1, typename C2, typename LessThan>
+bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) {
   return std::includes(container_algorithm_internal::c_begin(c1),
                        container_algorithm_internal::c_end(c1),
                        container_algorithm_internal::c_begin(c2),
                        container_algorithm_internal::c_end(c2),
-                       std::forward<Compare>(comp));
+                       std::forward<LessThan>(comp));
 }
 
 // c_set_union()
@@ -1243,7 +1242,7 @@
 
 // Overload of c_set_union() for performing a merge using a `comp` other than
 // `operator<`.
-template <typename C1, typename C2, typename OutputIterator, typename Compare,
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
           typename = typename std::enable_if<
               !container_algorithm_internal::IsUnorderedContainer<C1>::value,
               void>::type,
@@ -1251,18 +1250,18 @@
               !container_algorithm_internal::IsUnorderedContainer<C2>::value,
               void>::type>
 OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output,
-                           Compare&& comp) {
+                           LessThan&& comp) {
   return std::set_union(container_algorithm_internal::c_begin(c1),
                         container_algorithm_internal::c_end(c1),
                         container_algorithm_internal::c_begin(c2),
                         container_algorithm_internal::c_end(c2), output,
-                        std::forward<Compare>(comp));
+                        std::forward<LessThan>(comp));
 }
 
 // c_set_intersection()
 //
 // Container-based version of the <algorithm> `std::set_intersection()` function
-// to return an iterator containing the intersection of two containers.
+// to return an iterator containing the intersection of two sorted containers.
 template <typename C1, typename C2, typename OutputIterator,
           typename = typename std::enable_if<
               !container_algorithm_internal::IsUnorderedContainer<C1>::value,
@@ -1272,6 +1271,11 @@
               void>::type>
 OutputIterator c_set_intersection(const C1& c1, const C2& c2,
                                   OutputIterator output) {
+  // In debug builds, ensure that both containers are sorted with respect to the
+  // default comparator. std::set_intersection requires the containers be sorted
+  // using operator<.
+  assert(absl::c_is_sorted(c1));
+  assert(absl::c_is_sorted(c2));
   return std::set_intersection(container_algorithm_internal::c_begin(c1),
                                container_algorithm_internal::c_end(c1),
                                container_algorithm_internal::c_begin(c2),
@@ -1280,7 +1284,7 @@
 
 // Overload of c_set_intersection() for performing a merge using a `comp` other
 // than `operator<`.
-template <typename C1, typename C2, typename OutputIterator, typename Compare,
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
           typename = typename std::enable_if<
               !container_algorithm_internal::IsUnorderedContainer<C1>::value,
               void>::type,
@@ -1288,12 +1292,17 @@
               !container_algorithm_internal::IsUnorderedContainer<C2>::value,
               void>::type>
 OutputIterator c_set_intersection(const C1& c1, const C2& c2,
-                                  OutputIterator output, Compare&& comp) {
+                                  OutputIterator output, LessThan&& comp) {
+  // In debug builds, ensure that both containers are sorted with respect to the
+  // default comparator. std::set_intersection requires the containers be sorted
+  // using the same comparator.
+  assert(absl::c_is_sorted(c1, comp));
+  assert(absl::c_is_sorted(c2, comp));
   return std::set_intersection(container_algorithm_internal::c_begin(c1),
                                container_algorithm_internal::c_end(c1),
                                container_algorithm_internal::c_begin(c2),
                                container_algorithm_internal::c_end(c2), output,
-                               std::forward<Compare>(comp));
+                               std::forward<LessThan>(comp));
 }
 
 // c_set_difference()
@@ -1318,7 +1327,7 @@
 
 // Overload of c_set_difference() for performing a merge using a `comp` other
 // than `operator<`.
-template <typename C1, typename C2, typename OutputIterator, typename Compare,
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
           typename = typename std::enable_if<
               !container_algorithm_internal::IsUnorderedContainer<C1>::value,
               void>::type,
@@ -1326,12 +1335,12 @@
               !container_algorithm_internal::IsUnorderedContainer<C2>::value,
               void>::type>
 OutputIterator c_set_difference(const C1& c1, const C2& c2,
-                                OutputIterator output, Compare&& comp) {
+                                OutputIterator output, LessThan&& comp) {
   return std::set_difference(container_algorithm_internal::c_begin(c1),
                              container_algorithm_internal::c_end(c1),
                              container_algorithm_internal::c_begin(c2),
                              container_algorithm_internal::c_end(c2), output,
-                             std::forward<Compare>(comp));
+                             std::forward<LessThan>(comp));
 }
 
 // c_set_symmetric_difference()
@@ -1357,7 +1366,7 @@
 
 // Overload of c_set_symmetric_difference() for performing a merge using a
 // `comp` other than `operator<`.
-template <typename C1, typename C2, typename OutputIterator, typename Compare,
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
           typename = typename std::enable_if<
               !container_algorithm_internal::IsUnorderedContainer<C1>::value,
               void>::type,
@@ -1366,13 +1375,13 @@
               void>::type>
 OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
                                           OutputIterator output,
-                                          Compare&& comp) {
+                                          LessThan&& comp) {
   return std::set_symmetric_difference(
       container_algorithm_internal::c_begin(c1),
       container_algorithm_internal::c_end(c1),
       container_algorithm_internal::c_begin(c2),
       container_algorithm_internal::c_end(c2), output,
-      std::forward<Compare>(comp));
+      std::forward<LessThan>(comp));
 }
 
 //------------------------------------------------------------------------------
@@ -1391,11 +1400,11 @@
 
 // Overload of c_push_heap() for performing a push operation on a heap using a
 // `comp` other than `operator<`.
-template <typename RandomAccessContainer, typename Compare>
-void c_push_heap(RandomAccessContainer& sequence, Compare&& comp) {
+template <typename RandomAccessContainer, typename LessThan>
+void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) {
   std::push_heap(container_algorithm_internal::c_begin(sequence),
                  container_algorithm_internal::c_end(sequence),
-                 std::forward<Compare>(comp));
+                 std::forward<LessThan>(comp));
 }
 
 // c_pop_heap()
@@ -1410,11 +1419,11 @@
 
 // Overload of c_pop_heap() for performing a pop operation on a heap using a
 // `comp` other than `operator<`.
-template <typename RandomAccessContainer, typename Compare>
-void c_pop_heap(RandomAccessContainer& sequence, Compare&& comp) {
+template <typename RandomAccessContainer, typename LessThan>
+void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) {
   std::pop_heap(container_algorithm_internal::c_begin(sequence),
                 container_algorithm_internal::c_end(sequence),
-                std::forward<Compare>(comp));
+                std::forward<LessThan>(comp));
 }
 
 // c_make_heap()
@@ -1429,11 +1438,11 @@
 
 // Overload of c_make_heap() for performing heap comparisons using a
 // `comp` other than `operator<`
-template <typename RandomAccessContainer, typename Compare>
-void c_make_heap(RandomAccessContainer& sequence, Compare&& comp) {
+template <typename RandomAccessContainer, typename LessThan>
+void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) {
   std::make_heap(container_algorithm_internal::c_begin(sequence),
                  container_algorithm_internal::c_end(sequence),
-                 std::forward<Compare>(comp));
+                 std::forward<LessThan>(comp));
 }
 
 // c_sort_heap()
@@ -1448,11 +1457,11 @@
 
 // Overload of c_sort_heap() for performing heap comparisons using a
 // `comp` other than `operator<`
-template <typename RandomAccessContainer, typename Compare>
-void c_sort_heap(RandomAccessContainer& sequence, Compare&& comp) {
+template <typename RandomAccessContainer, typename LessThan>
+void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) {
   std::sort_heap(container_algorithm_internal::c_begin(sequence),
                  container_algorithm_internal::c_end(sequence),
-                 std::forward<Compare>(comp));
+                 std::forward<LessThan>(comp));
 }
 
 // c_is_heap()
@@ -1467,11 +1476,11 @@
 
 // Overload of c_is_heap() for performing heap comparisons using a
 // `comp` other than `operator<`
-template <typename RandomAccessContainer, typename Compare>
-bool c_is_heap(const RandomAccessContainer& sequence, Compare&& comp) {
+template <typename RandomAccessContainer, typename LessThan>
+bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) {
   return std::is_heap(container_algorithm_internal::c_begin(sequence),
                       container_algorithm_internal::c_end(sequence),
-                      std::forward<Compare>(comp));
+                      std::forward<LessThan>(comp));
 }
 
 // c_is_heap_until()
@@ -1487,12 +1496,12 @@
 
 // Overload of c_is_heap_until() for performing heap comparisons using a
 // `comp` other than `operator<`
-template <typename RandomAccessContainer, typename Compare>
+template <typename RandomAccessContainer, typename LessThan>
 container_algorithm_internal::ContainerIter<RandomAccessContainer>
-c_is_heap_until(RandomAccessContainer& sequence, Compare&& comp) {
+c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) {
   return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
                             container_algorithm_internal::c_end(sequence),
-                            std::forward<Compare>(comp));
+                            std::forward<LessThan>(comp));
 }
 
 //------------------------------------------------------------------------------
@@ -1513,12 +1522,12 @@
 
 // Overload of c_min_element() for performing a `comp` comparison other than
 // `operator<`.
-template <typename Sequence, typename Compare>
+template <typename Sequence, typename LessThan>
 container_algorithm_internal::ContainerIter<Sequence> c_min_element(
-    Sequence& sequence, Compare&& comp) {
+    Sequence& sequence, LessThan&& comp) {
   return std::min_element(container_algorithm_internal::c_begin(sequence),
                           container_algorithm_internal::c_end(sequence),
-                          std::forward<Compare>(comp));
+                          std::forward<LessThan>(comp));
 }
 
 // c_max_element()
@@ -1535,12 +1544,12 @@
 
 // Overload of c_max_element() for performing a `comp` comparison other than
 // `operator<`.
-template <typename Sequence, typename Compare>
+template <typename Sequence, typename LessThan>
 container_algorithm_internal::ContainerIter<Sequence> c_max_element(
-    Sequence& sequence, Compare&& comp) {
+    Sequence& sequence, LessThan&& comp) {
   return std::max_element(container_algorithm_internal::c_begin(sequence),
                           container_algorithm_internal::c_end(sequence),
-                          std::forward<Compare>(comp));
+                          std::forward<LessThan>(comp));
 }
 
 // c_minmax_element()
@@ -1550,20 +1559,20 @@
 // smallest and largest values, respectively, using `operator<` to make the
 // comparisons.
 template <typename C>
-container_algorithm_internal::ContainerIterPairType<C, C>
-c_minmax_element(C& c) {
+container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
+    C& c) {
   return std::minmax_element(container_algorithm_internal::c_begin(c),
                              container_algorithm_internal::c_end(c));
 }
 
 // Overload of c_minmax_element() for performing `comp` comparisons other than
 // `operator<`.
-template <typename C, typename Compare>
-container_algorithm_internal::ContainerIterPairType<C, C>
-c_minmax_element(C& c, Compare&& comp) {
+template <typename C, typename LessThan>
+container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
+    C& c, LessThan&& comp) {
   return std::minmax_element(container_algorithm_internal::c_begin(c),
                              container_algorithm_internal::c_end(c),
-                             std::forward<Compare>(comp));
+                             std::forward<LessThan>(comp));
 }
 
 //------------------------------------------------------------------------------
@@ -1578,7 +1587,8 @@
 // that capital letters ("A-Z") have ASCII values less than lowercase letters
 // ("a-z").
 template <typename Sequence1, typename Sequence2>
-bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) {
+bool c_lexicographical_compare(const Sequence1& sequence1,
+                               const Sequence2& sequence2) {
   return std::lexicographical_compare(
       container_algorithm_internal::c_begin(sequence1),
       container_algorithm_internal::c_end(sequence1),
@@ -1588,15 +1598,15 @@
 
 // Overload of c_lexicographical_compare() for performing a lexicographical
 // comparison using a `comp` operator instead of `operator<`.
-template <typename Sequence1, typename Sequence2, typename Compare>
-bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2,
-                               Compare&& comp) {
+template <typename Sequence1, typename Sequence2, typename LessThan>
+bool c_lexicographical_compare(const Sequence1& sequence1,
+                               const Sequence2& sequence2, LessThan&& comp) {
   return std::lexicographical_compare(
       container_algorithm_internal::c_begin(sequence1),
       container_algorithm_internal::c_end(sequence1),
       container_algorithm_internal::c_begin(sequence2),
       container_algorithm_internal::c_end(sequence2),
-      std::forward<Compare>(comp));
+      std::forward<LessThan>(comp));
 }
 
 // c_next_permutation()
@@ -1612,11 +1622,11 @@
 
 // Overload of c_next_permutation() for performing a lexicographical
 // comparison using a `comp` operator instead of `operator<`.
-template <typename C, typename Compare>
-bool c_next_permutation(C& c, Compare&& comp) {
+template <typename C, typename LessThan>
+bool c_next_permutation(C& c, LessThan&& comp) {
   return std::next_permutation(container_algorithm_internal::c_begin(c),
                                container_algorithm_internal::c_end(c),
-                               std::forward<Compare>(comp));
+                               std::forward<LessThan>(comp));
 }
 
 // c_prev_permutation()
@@ -1632,11 +1642,11 @@
 
 // Overload of c_prev_permutation() for performing a lexicographical
 // comparison using a `comp` operator instead of `operator<`.
-template <typename C, typename Compare>
-bool c_prev_permutation(C& c, Compare&& comp) {
+template <typename C, typename LessThan>
+bool c_prev_permutation(C& c, LessThan&& comp) {
   return std::prev_permutation(container_algorithm_internal::c_begin(c),
                                container_algorithm_internal::c_end(c),
-                               std::forward<Compare>(comp));
+                               std::forward<LessThan>(comp));
 }
 
 //------------------------------------------------------------------------------
@@ -1645,18 +1655,18 @@
 
 // c_iota()
 //
-// Container-based version of the <algorithm> `std::iota()` function
+// Container-based version of the <numeric> `std::iota()` function
 // to compute successive values of `value`, as if incremented with `++value`
 // after each element is written. and write them to the container.
 template <typename Sequence, typename T>
-void c_iota(Sequence& sequence, T&& value) {
+void c_iota(Sequence& sequence, const T& value) {
   std::iota(container_algorithm_internal::c_begin(sequence),
-            container_algorithm_internal::c_end(sequence),
-            std::forward<T>(value));
+            container_algorithm_internal::c_end(sequence), value);
 }
+
 // c_accumulate()
 //
-// Container-based version of the <algorithm> `std::accumulate()` function
+// Container-based version of the <numeric> `std::accumulate()` function
 // to accumulate the element values of a container to `init` and return that
 // accumulation by value.
 //
@@ -1683,7 +1693,7 @@
 
 // c_inner_product()
 //
-// Container-based version of the <algorithm> `std::inner_product()` function
+// Container-based version of the <numeric> `std::inner_product()` function
 // to compute the cumulative inner product of container element pairs.
 //
 // Note: Due to a language technicality this function has return type
@@ -1714,7 +1724,7 @@
 
 // c_adjacent_difference()
 //
-// Container-based version of the <algorithm> `std::adjacent_difference()`
+// Container-based version of the <numeric> `std::adjacent_difference()`
 // function to compute the difference between each element and the one preceding
 // it and write it to an iterator.
 template <typename InputSequence, typename OutputIt>
@@ -1737,7 +1747,7 @@
 
 // c_partial_sum()
 //
-// Container-based version of the <algorithm> `std::partial_sum()` function
+// Container-based version of the <numeric> `std::partial_sum()` function
 // to compute the partial sum of the elements in a sequence and write them
 // to an iterator. The partial sum is the sum of all element values so far in
 // the sequence.
diff --git a/abseil-cpp/absl/algorithm/container_test.cc b/abseil-cpp/absl/algorithm/container_test.cc
index 605afc8..0fbc777 100644
--- a/abseil-cpp/absl/algorithm/container_test.cc
+++ b/abseil-cpp/absl/algorithm/container_test.cc
@@ -67,13 +67,16 @@
 bool IsOdd(int x) { return x % 2 != 0; }
 
 TEST_F(NonMutatingTest, Distance) {
-  EXPECT_EQ(container_.size(), absl::c_distance(container_));
-  EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_));
-  EXPECT_EQ(vector_.size(), absl::c_distance(vector_));
-  EXPECT_EQ(ABSL_ARRAYSIZE(array_), absl::c_distance(array_));
+  EXPECT_EQ(container_.size(),
+            static_cast<size_t>(absl::c_distance(container_)));
+  EXPECT_EQ(sequence_.size(), static_cast<size_t>(absl::c_distance(sequence_)));
+  EXPECT_EQ(vector_.size(), static_cast<size_t>(absl::c_distance(vector_)));
+  EXPECT_EQ(ABSL_ARRAYSIZE(array_),
+            static_cast<size_t>(absl::c_distance(array_)));
 
   // Works with a temporary argument.
-  EXPECT_EQ(vector_.size(), absl::c_distance(std::vector<int>(vector_)));
+  EXPECT_EQ(vector_.size(),
+            static_cast<size_t>(absl::c_distance(std::vector<int>(vector_))));
 }
 
 TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) {
diff --git a/abseil-cpp/absl/algorithm/equal_benchmark.cc b/abseil-cpp/absl/algorithm/equal_benchmark.cc
index 7bf62c9..948cd65 100644
--- a/abseil-cpp/absl/algorithm/equal_benchmark.cc
+++ b/abseil-cpp/absl/algorithm/equal_benchmark.cc
@@ -15,8 +15,8 @@
 #include <cstdint>
 #include <cstring>
 
-#include "benchmark/benchmark.h"
 #include "absl/algorithm/algorithm.h"
+#include "benchmark/benchmark.h"
 
 namespace {
 
diff --git a/abseil-cpp/absl/base/BUILD.bazel b/abseil-cpp/absl/base/BUILD.bazel
index 9d96abe..fb008db 100644
--- a/abseil-cpp/absl/base/BUILD.bazel
+++ b/abseil-cpp/absl/base/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -64,6 +63,18 @@
 )
 
 cc_library(
+    name = "nullability",
+    srcs = ["internal/nullability_impl.h"],
+    hdrs = ["nullability.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":core_headers",
+        "//absl/meta:type_traits",
+    ],
+)
+
+cc_library(
     name = "raw_logging_internal",
     srcs = ["internal/raw_logging.cc"],
     hdrs = ["internal/raw_logging.h"],
@@ -76,6 +87,7 @@
         ":atomic_hook",
         ":config",
         ":core_headers",
+        ":errno_saver",
         ":log_severity",
     ],
 )
@@ -114,6 +126,23 @@
 )
 
 cc_library(
+    name = "cycleclock_internal",
+    hdrs = [
+        "internal/cycleclock_config.h",
+        "internal/unscaledcycleclock_config.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":base_internal",
+        ":config",
+    ],
+)
+
+cc_library(
     name = "dynamic_annotations",
     srcs = [
         "internal/dynamic_annotations.h",
@@ -158,9 +187,12 @@
         "internal/direct_mmap.h",
         "internal/low_level_alloc.h",
     ],
-    copts = ABSL_DEFAULT_COPTS,
+    copts = ABSL_DEFAULT_COPTS + select({
+        "//conditions:default": [],
+    }),
     linkopts = select({
-        "//absl:windows": [],
+        "//absl:msvc_compiler": [],
+        "//absl:clang-cl_compiler": [],
         "//absl:wasm": [],
         "//conditions:default": ["-pthread"],
     }) + ABSL_DEFAULT_LINKOPTS,
@@ -220,9 +252,16 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = select({
-        "//absl:windows": [
+        "//absl:msvc_compiler": [
             "-DEFAULTLIB:advapi32.lib",
         ],
+        "//absl:clang-cl_compiler": [
+            "-DEFAULTLIB:advapi32.lib",
+        ],
+        "//absl:mingw_compiler": [
+            "-DEFAULTLIB:advapi32.lib",
+            "-ladvapi32",
+        ],
         "//absl:wasm": [],
         "//conditions:default": ["-pthread"],
     }) + ABSL_DEFAULT_LINKOPTS,
@@ -231,6 +270,7 @@
         ":base_internal",
         ":config",
         ":core_headers",
+        ":cycleclock_internal",
         ":dynamic_annotations",
         ":log_severity",
         ":raw_logging_internal",
@@ -429,6 +469,9 @@
     srcs = ["spinlock_test_common.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":base",
         ":base_internal",
@@ -479,6 +522,7 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":base",
         ":config",
         ":core_headers",
     ],
@@ -521,6 +565,16 @@
 )
 
 cc_test(
+    name = "nullability_test",
+    srcs = ["nullability_test.cc"],
+    deps = [
+        ":core_headers",
+        ":nullability",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "raw_logging_test",
     srcs = ["raw_logging_test.cc"],
     copts = ABSL_TEST_COPTS,
@@ -551,7 +605,10 @@
     srcs = ["internal/low_level_alloc_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = ["no_test_ios_x86_64"],
+    tags = [
+        "no_test_ios_x86_64",
+        "no_test_wasm",
+    ],
     deps = [
         ":malloc_internal",
         "//absl/container:node_hash_map",
@@ -564,6 +621,9 @@
     srcs = ["internal/thread_identity_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":base",
         ":core_headers",
@@ -587,100 +647,6 @@
 )
 
 cc_library(
-    name = "bits",
-    hdrs = ["internal/bits.h"],
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
-    deps = [
-        ":config",
-        ":core_headers",
-    ],
-)
-
-cc_test(
-    name = "bits_test",
-    size = "small",
-    srcs = ["internal/bits_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":bits",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_library(
-    name = "exponential_biased",
-    srcs = ["internal/exponential_biased.cc"],
-    hdrs = ["internal/exponential_biased.h"],
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
-    deps = [
-        ":config",
-        ":core_headers",
-    ],
-)
-
-cc_test(
-    name = "exponential_biased_test",
-    size = "small",
-    srcs = ["internal/exponential_biased_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
-    deps = [
-        ":exponential_biased",
-        "//absl/strings",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_library(
-    name = "periodic_sampler",
-    srcs = ["internal/periodic_sampler.cc"],
-    hdrs = ["internal/periodic_sampler.h"],
-    copts = ABSL_DEFAULT_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":core_headers",
-        ":exponential_biased",
-    ],
-)
-
-cc_test(
-    name = "periodic_sampler_test",
-    size = "small",
-    srcs = ["internal/periodic_sampler_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
-    deps = [
-        ":core_headers",
-        ":periodic_sampler",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_binary(
-    name = "periodic_sampler_benchmark",
-    testonly = 1,
-    srcs = ["internal/periodic_sampler_benchmark.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = ["benchmark"],
-    visibility = ["//visibility:private"],
-    deps = [
-        ":core_headers",
-        ":periodic_sampler",
-        "@com_github_google_benchmark//:benchmark_main",
-    ],
-)
-
-cc_library(
     name = "scoped_set_env",
     testonly = 1,
     srcs = ["internal/scoped_set_env.cc"],
@@ -790,6 +756,35 @@
     ],
 )
 
+cc_library(
+    name = "prefetch",
+    hdrs = [
+        "internal/prefetch.h",
+        "prefetch.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":core_headers",  # TODO(b/265984188): remove
+    ],
+)
+
+cc_test(
+    name = "prefetch_test",
+    size = "small",
+    srcs = [
+        "internal/prefetch_test.cc",
+        "prefetch_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":prefetch",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
 cc_test(
     name = "unique_small_name_test",
     size = "small",
diff --git a/abseil-cpp/absl/base/CMakeLists.txt b/abseil-cpp/absl/base/CMakeLists.txt
index 9ff5aa2..76c4ff1 100644
--- a/abseil-cpp/absl/base/CMakeLists.txt
+++ b/abseil-cpp/absl/base/CMakeLists.txt
@@ -16,6 +16,7 @@
 
 find_library(LIBRT rt)
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     atomic_hook
@@ -28,6 +29,7 @@
     ${ABSL_DEFAULT_COPTS}
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     errno_saver
@@ -54,6 +56,34 @@
 
 absl_cc_library(
   NAME
+    nullability
+  HDRS
+    "nullability.h"
+  SRCS
+    "internal/nullability_impl.h"
+  DEPS
+    absl::core_headers
+    absl::type_traits
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_test(
+  NAME
+    nullability_test
+  SRCS
+    "nullability_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::core_headers
+    absl::nullability
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
     raw_logging_internal
   HDRS
     "internal/raw_logging.h"
@@ -63,11 +93,13 @@
     absl::atomic_hook
     absl::config
     absl::core_headers
+    absl::errno_saver
     absl::log_severity
   COPTS
     ${ABSL_DEFAULT_COPTS}
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     spinlock_wait
@@ -131,6 +163,7 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     malloc_internal
@@ -151,6 +184,7 @@
     Threads::Threads
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     base_internal
@@ -174,6 +208,7 @@
     "call_once.h"
     "casts.h"
     "internal/cycleclock.h"
+    "internal/cycleclock_config.h"
     "internal/low_level_scheduling.h"
     "internal/per_thread_tls.h"
     "internal/spinlock.h"
@@ -181,6 +216,7 @@
     "internal/thread_identity.h"
     "internal/tsan_mutex_interface.h"
     "internal/unscaledcycleclock.h"
+    "internal/unscaledcycleclock_config.h"
   SRCS
     "internal/cycleclock.cc"
     "internal/spinlock.cc"
@@ -192,7 +228,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
     $<$<BOOL:${LIBRT}>:-lrt>
-    $<$<BOOL:${MINGW}>:"advapi32">
+    $<$<BOOL:${MINGW}>:-ladvapi32>
   DEPS
     absl::atomic_hook
     absl::base_internal
@@ -207,6 +243,7 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     throw_delegate
@@ -221,6 +258,7 @@
     absl::raw_logging_internal
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     exception_testing
@@ -230,10 +268,11 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::config
-    gtest
+    GTest::gtest
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     pretty_function
@@ -243,6 +282,7 @@
     ${ABSL_DEFAULT_COPTS}
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     exception_safety_testing
@@ -259,7 +299,7 @@
     absl::meta
     absl::strings
     absl::utility
-    gtest
+    GTest::gtest
   TESTONLY
 )
 
@@ -273,9 +313,10 @@
   DEPS
     absl::exception_safety_testing
     absl::memory
-    gtest_main
+    GTest::gtest_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     atomic_hook_test_helper
@@ -300,8 +341,8 @@
     absl::atomic_hook_test_helper
     absl::atomic_hook
     absl::core_headers
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -314,7 +355,7 @@
   DEPS
     absl::base
     absl::core_headers
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -327,8 +368,8 @@
   DEPS
     absl::errno_saver
     absl::strerror
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -342,7 +383,7 @@
     absl::base
     absl::config
     absl::throw_delegate
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -357,7 +398,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::base_internal
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -371,10 +412,11 @@
     absl::base_internal
     absl::memory
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     spinlock_test_common
@@ -388,7 +430,7 @@
     absl::base_internal
     absl::core_headers
     absl::synchronization
-    gtest
+    GTest::gtest
   TESTONLY
 )
 
@@ -406,9 +448,10 @@
     absl::config
     absl::core_headers
     absl::synchronization
-    gtest_main
+    GTest::gtest_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     endian
@@ -418,6 +461,7 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::base
     absl::config
     absl::core_headers
   PUBLIC
@@ -434,7 +478,7 @@
     absl::base
     absl::config
     absl::endian
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -447,7 +491,7 @@
   DEPS
     absl::config
     absl::synchronization
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -461,7 +505,7 @@
     absl::base
     absl::core_headers
     absl::synchronization
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -474,7 +518,7 @@
   DEPS
     absl::raw_logging_internal
     absl::strings
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -487,7 +531,7 @@
   DEPS
     absl::base
     absl::synchronization
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -515,87 +559,10 @@
     absl::core_headers
     absl::synchronization
     Threads::Threads
-    gtest_main
+    GTest::gtest_main
 )
 
-absl_cc_library(
-  NAME
-    bits
-  HDRS
-    "internal/bits.h"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::config
-    absl::core_headers
-)
-
-absl_cc_test(
-  NAME
-    bits_test
-  SRCS
-    "internal/bits_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::bits
-    gtest_main
-)
-
-absl_cc_library(
-  NAME
-    exponential_biased
-  SRCS
-    "internal/exponential_biased.cc"
-  HDRS
-    "internal/exponential_biased.h"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::config
-    absl::core_headers
-)
-
-absl_cc_test(
-  NAME
-    exponential_biased_test
-  SRCS
-    "internal/exponential_biased_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::exponential_biased
-    absl::strings
-    gmock_main
-)
-
-absl_cc_library(
-  NAME
-    periodic_sampler
-  SRCS
-    "internal/periodic_sampler.cc"
-  HDRS
-    "internal/periodic_sampler.h"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::core_headers
-    absl::exponential_biased
-)
-
-absl_cc_test(
-  NAME
-    periodic_sampler_test
-  SRCS
-    "internal/periodic_sampler_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::core_headers
-    absl::periodic_sampler
-    gmock_main
-)
-
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     scoped_set_env
@@ -619,7 +586,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::scoped_set_env
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -643,10 +610,11 @@
     absl::flags_marshalling
     absl::log_severity
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     strerror
@@ -674,10 +642,11 @@
   DEPS
     absl::strerror
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     fast_type_id
@@ -700,7 +669,35 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::fast_type_id
-    gtest_main
+    GTest::gtest_main
+)
+
+absl_cc_library(
+  NAME
+    prefetch
+  HDRS
+    "prefetch.h"
+    "internal/prefetch.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers  # TODO(b/265984188): remove
+)
+
+absl_cc_test(
+  NAME
+    prefetch_test
+  SRCS
+    "prefetch_test.cc"
+    "internal/prefetch_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::prefetch
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -713,5 +710,5 @@
   DEPS
     absl::core_headers
     absl::optional
-    gtest_main
+    GTest::gtest_main
 )
diff --git a/abseil-cpp/absl/base/attributes.h b/abseil-cpp/absl/base/attributes.h
index 046fbea..a7f279a 100644
--- a/abseil-cpp/absl/base/attributes.h
+++ b/abseil-cpp/absl/base/attributes.h
@@ -18,8 +18,6 @@
 // These macros are used within Abseil and allow the compiler to optimize, where
 // applicable, certain function calls.
 //
-// This file is used for both C and C++!
-//
 // Most macros here are exposing GCC or Clang features, and are stubbed out for
 // other compilers.
 //
@@ -121,7 +119,7 @@
 #if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
 #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
 #define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
-#elif defined(__GNUC__) && !defined(__clang__)
+#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
 #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
 #define ABSL_ATTRIBUTE_NO_TAIL_CALL \
   __attribute__((optimize("no-optimize-sibling-calls")))
@@ -133,14 +131,15 @@
 // ABSL_ATTRIBUTE_WEAK
 //
 // Tags a function as weak for the purposes of compilation and linking.
-// Weak attributes currently do not work properly in LLVM's Windows backend,
-// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598
+// Weak attributes did not work properly in LLVM's Windows backend before
+// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598
 // for further information.
 // The MinGW compiler doesn't complain about the weak attribute until the link
 // step, presumably because Windows doesn't use ELF binaries.
-#if (ABSL_HAVE_ATTRIBUTE(weak) ||                   \
-     (defined(__GNUC__) && !defined(__clang__))) && \
-    !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__)
+#if (ABSL_HAVE_ATTRIBUTE(weak) ||                                         \
+     (defined(__GNUC__) && !defined(__clang__))) &&                       \
+    (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
+    !defined(__MINGW32__)
 #undef ABSL_ATTRIBUTE_WEAK
 #define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
 #define ABSL_HAVE_ATTRIBUTE_WEAK 1
@@ -212,8 +211,20 @@
 // out of bounds or does other scary things with memory.
 // NOTE: GCC supports AddressSanitizer(asan) since 4.8.
 // https://gcc.gnu.org/gcc-4.8/changes.html
-#if ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+    ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
 #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) && defined(_MSC_VER) && \
+    _MSC_VER >= 1928
+// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address)
+#elif defined(ABSL_HAVE_HWADDRESS_SANITIZER) && ABSL_HAVE_ATTRIBUTE(no_sanitize)
+// HWAddressSanitizer is a sanitizer similar to AddressSanitizer, which uses CPU
+// features to detect similar bugs with less CPU and memory overhead.
+// NOTE: GCC supports HWAddressSanitizer(hwasan) since 11.
+// https://gcc.gnu.org/gcc-11/changes.html
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS \
+  __attribute__((no_sanitize("hwaddress")))
 #else
 #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
 #endif
@@ -263,7 +274,7 @@
 //
 // Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
 // See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
-#if ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__llvm__)
 #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
 #else
 #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
@@ -283,10 +294,7 @@
 // ABSL_ATTRIBUTE_RETURNS_NONNULL
 //
 // Tells the compiler that a particular function never returns a null pointer.
-#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) || \
-    (defined(__GNUC__) && \
-     (__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \
-     !defined(__clang__))
+#if ABSL_HAVE_ATTRIBUTE(returns_nonnull)
 #define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
 #else
 #define ABSL_ATTRIBUTE_RETURNS_NONNULL
@@ -316,15 +324,22 @@
   __attribute__((section(#name))) __attribute__((noinline))
 #endif
 
-
 // ABSL_ATTRIBUTE_SECTION_VARIABLE
 //
 // Tells the compiler/linker to put a given variable into a section and define
 // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
 // This functionality is supported by GNU linker.
 #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
+#ifdef _AIX
+// __attribute__((section(#name))) on AIX is achieved by using the `.csect`
+// psudo op which includes an additional integer as part of its syntax indcating
+// alignment. If data fall under different alignments then you might get a
+// compilation error indicating a `Section type conflict`.
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#else
 #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
 #endif
+#endif
 
 // ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
 //
@@ -335,8 +350,8 @@
 // a no-op on ELF but not on Mach-O.
 //
 #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
-#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
-  extern char __start_##name[] ABSL_ATTRIBUTE_WEAK;    \
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)   \
+  extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
   extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
 #endif
 #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
@@ -397,6 +412,9 @@
 //
 // Tells the compiler to warn about unused results.
 //
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard `[[nodiscard]]` directly over this macro.
+//
 // When annotating a function, it must appear as the first part of the
 // declaration or definition. The compiler will warn if the return value from
 // such a function is unused:
@@ -423,9 +441,10 @@
 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
 //
 // Note: past advice was to place the macro after the argument list.
-#if ABSL_HAVE_ATTRIBUTE(nodiscard)
-#define ABSL_MUST_USE_RESULT [[nodiscard]]
-#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
+//
+// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is
+// compliant with the stricter [[nodiscard]].
+#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
 #define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
 #else
 #define ABSL_MUST_USE_RESULT
@@ -495,7 +514,7 @@
 #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
 #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
 #define ABSL_XRAY_LOG_ARGS(N) \
-    [[clang::xray_always_instrument, clang::xray_log_args(N)]]
+  [[clang::xray_always_instrument, clang::xray_log_args(N)]]
 #else
 #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
 #endif
@@ -526,6 +545,13 @@
 // ABSL_ATTRIBUTE_UNUSED
 //
 // Prevents the compiler from complaining about variables that appear unused.
+//
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard '[[maybe_unused]]' directly over this macro.
+//
+// Due to differences in positioning requirements between the old, compiler
+// specific __attribute__ syntax and the now standard [[maybe_unused]], this
+// macro does not attempt to take advantage of '[[maybe_unused]]'.
 #if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__))
 #undef ABSL_ATTRIBUTE_UNUSED
 #define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__))
@@ -546,13 +572,19 @@
 // ABSL_ATTRIBUTE_PACKED
 //
 // Instructs the compiler not to use natural alignment for a tagged data
-// structure, but instead to reduce its alignment to 1. This attribute can
-// either be applied to members of a structure or to a structure in its
-// entirety. Applying this attribute (judiciously) to a structure in its
-// entirety to optimize the memory footprint of very commonly-used structs is
-// fine. Do not apply this attribute to a structure in its entirety if the
-// purpose is to control the offsets of the members in the structure. Instead,
-// apply this attribute only to structure members that need it.
+// structure, but instead to reduce its alignment to 1.
+//
+// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing
+// so can cause atomic variables to be mis-aligned and silently violate
+// atomicity on x86.
+//
+// This attribute can either be applied to members of a structure or to a
+// structure in its entirety. Applying this attribute (judiciously) to a
+// structure in its entirety to optimize the memory footprint of very
+// commonly-used structs is fine. Do not apply this attribute to a structure in
+// its entirety if the purpose is to control the offsets of the members in the
+// structure. Instead, apply this attribute only to structure members that need
+// it.
 //
 // When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the
 // natural alignment of structure members not annotated is preserved. Aligned
@@ -597,30 +629,24 @@
 //    case 42:
 //      ...
 //
-// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED
-// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed
-// when  performing switch labels fall-through diagnostic
-// (`-Wimplicit-fallthrough`). See clang documentation on language extensions
-// for details:
+// Notes: When supported, GCC and Clang can issue a warning on switch labels
+// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See
+// clang documentation on language extensions for details:
 // https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
 //
-// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro
-// has no effect on diagnostics. In any case this macro has no effect on runtime
+// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has
+// no effect on diagnostics. In any case this macro has no effect on runtime
 // behavior and performance of code.
+
 #ifdef ABSL_FALLTHROUGH_INTENDED
 #error "ABSL_FALLTHROUGH_INTENDED should not be defined."
-#endif
-
-// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported.
-#if defined(__clang__) && defined(__has_warning)
-#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough)
 #define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
-#endif
-#elif defined(__GNUC__) && __GNUC__ >= 7
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough)
 #define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
-#endif
-
-#ifndef ABSL_FALLTHROUGH_INTENDED
+#else
 #define ABSL_FALLTHROUGH_INTENDED \
   do {                            \
   } while (0)
@@ -632,6 +658,9 @@
 // declarations. The macro argument is used as a custom diagnostic message (e.g.
 // suggestion of a better alternative).
 //
+// For code or headers that are assured to only build with C++14 and up, prefer
+// just using the standard `[[deprecated("message")]]` directly over this macro.
+//
 // Examples:
 //
 //   class ABSL_DEPRECATED("Use Bar instead") Foo {...};
@@ -642,26 +671,60 @@
 //   ABSL_DEPRECATED("Use DoThat() instead")
 //   void DoThis();
 //
+//   enum FooEnum {
+//     kBar ABSL_DEPRECATED("Use kBaz instead"),
+//   };
+//
 // Every usage of a deprecated entity will trigger a warning when compiled with
-// clang's `-Wdeprecated-declarations` option. This option is turned off by
-// default, but the warnings will be reported by clang-tidy.
-#if defined(__clang__) && __cplusplus >= 201103L
+// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain
+// turns this warning off by default, instead relying on clang-tidy to report
+// new uses of deprecated code.
+#if ABSL_HAVE_ATTRIBUTE(deprecated)
 #define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
-#endif
-
-#ifndef ABSL_DEPRECATED
+#else
 #define ABSL_DEPRECATED(message)
 #endif
 
+// When deprecating Abseil code, it is sometimes necessary to turn off the
+// warning within Abseil, until the deprecated code is actually removed. The
+// deprecated code can be surrounded with these directives to acheive that
+// result.
+//
+// class ABSL_DEPRECATED("Use Bar instead") Foo;
+//
+// ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+// Baz ComputeBazFromFoo(Foo f);
+// ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#if defined(__GNUC__) || defined(__clang__)
+// Clang also supports these GCC pragmas.
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("GCC diagnostic push")             \
+  _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("GCC diagnostic pop")
+#else
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#endif  // defined(__GNUC__) || defined(__clang__)
+
 // ABSL_CONST_INIT
 //
 // A variable declaration annotated with the `ABSL_CONST_INIT` attribute will
 // not compile (on supported platforms) unless the variable has a constant
 // initializer. This is useful for variables with static and thread storage
 // duration, because it guarantees that they will not suffer from the so-called
-// "static init order fiasco".  Prefer to put this attribute on the most visible
-// declaration of the variable, if there's more than one, because code that
-// accesses the variable can then use the attribute for optimization.
+// "static init order fiasco".
+//
+// This attribute must be placed on the initializing declaration of the
+// variable. Some compilers will give a -Wmissing-constinit warning when this
+// attribute is placed on some other declaration but missing from the
+// initializing declaration.
+//
+// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can
+// also be used in a non-initializing declaration to tell the compiler that a
+// variable is already initialized, reducing overhead that would otherwise be
+// incurred by a hidden guard variable. Thus annotating all declarations with
+// this attribute is recommended to potentially enhance optimization.
 //
 // Example:
 //
@@ -670,13 +733,103 @@
 //     ABSL_CONST_INIT static MyType my_var;
 //   };
 //
-//   MyType MyClass::my_var = MakeMyType(...);
+//   ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...);
+//
+// For code or headers that are assured to only build with C++20 and up, prefer
+// just using the standard `constinit` keyword directly over this macro.
 //
 // Note that this attribute is redundant if the variable is declared constexpr.
-#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#if defined(__cpp_constinit) && __cpp_constinit >= 201907L
+#define ABSL_CONST_INIT constinit
+#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
 #define ABSL_CONST_INIT [[clang::require_constant_initialization]]
 #else
 #define ABSL_CONST_INIT
-#endif  // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#endif
+
+// These annotations are not available yet due to fear of breaking code.
+#define ABSL_ATTRIBUTE_PURE_FUNCTION
+#define ABSL_ATTRIBUTE_CONST_FUNCTION
+
+// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function
+// parameter or implicit object parameter is retained by the return value of the
+// annotated function (or, for a parameter of a constructor, in the value of the
+// constructed object). This attribute causes warnings to be produced if a
+// temporary object does not live long enough.
+//
+// When applied to a reference parameter, the referenced object is assumed to be
+// retained by the return value of the function. When applied to a non-reference
+// parameter (for example, a pointer or a class type), all temporaries
+// referenced by the parameter are assumed to be retained by the return value of
+// the function.
+//
+// See also the upstream documentation:
+// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
+#elif ABSL_HAVE_ATTRIBUTE(lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
+#else
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND
+#endif
+
+// ABSL_ATTRIBUTE_TRIVIAL_ABI
+// Indicates that a type is "trivially relocatable" -- meaning it can be
+// relocated without invoking the constructor/destructor, using a form of move
+// elision.
+//
+// From a memory safety point of view, putting aside destructor ordering, it's
+// safe to apply ABSL_ATTRIBUTE_TRIVIAL_ABI if an object's location
+// can change over the course of its lifetime: if a constructor can be run one
+// place, and then the object magically teleports to another place where some
+// methods are run, and then the object teleports to yet another place where it
+// is destroyed. This is notably not true for self-referential types, where the
+// move-constructor must keep the self-reference up to date. If the type changed
+// location without invoking the move constructor, it would have a dangling
+// self-reference.
+//
+// The use of this teleporting machinery means that the number of paired
+// move/destroy operations can change, and so it is a bad idea to apply this to
+// a type meant to count the number of moves.
+//
+// Warning: applying this can, rarely, break callers. Objects passed by value
+// will be destroyed at the end of the call, instead of the end of the
+// full-expression containing the call. In addition, it changes the ABI
+// of functions accepting this type by value (e.g. to pass in registers).
+//
+// See also the upstream documentation:
+// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
+//
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::trivial_abi)
+#define ABSL_ATTRIBUTE_TRIVIAL_ABI [[clang::trivial_abi]]
+#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1
+#elif ABSL_HAVE_ATTRIBUTE(trivial_abi)
+#define ABSL_ATTRIBUTE_TRIVIAL_ABI __attribute__((trivial_abi))
+#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1
+#else
+#define ABSL_ATTRIBUTE_TRIVIAL_ABI
+#endif
+
+// ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+//
+// Indicates a data member can be optimized to occupy no space (if it is empty)
+// and/or its tail padding can be used for other members.
+//
+// For code that is assured to only build with C++20 or later, prefer using
+// the standard attribute `[[no_unique_address]]` directly instead of this
+// macro.
+//
+// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#c20-no_unique_address
+// Current versions of MSVC have disabled `[[no_unique_address]]` since it
+// breaks ABI compatibility, but offers `[[msvc::no_unique_address]]` for
+// situations when it can be assured that it is desired. Since Abseil does not
+// claim ABI compatibility in mixed builds, we can offer it unconditionally.
+#if defined(_MSC_VER) && _MSC_VER >= 1929
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(no_unique_address)
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+#endif
 
 #endif  // ABSL_BASE_ATTRIBUTES_H_
diff --git a/abseil-cpp/absl/base/call_once.h b/abseil-cpp/absl/base/call_once.h
index 5b468af..08436ba 100644
--- a/abseil-cpp/absl/base/call_once.h
+++ b/abseil-cpp/absl/base/call_once.h
@@ -123,7 +123,7 @@
 
  private:
   base_internal::SchedulingMode mode_;
-  bool guard_result_;
+  bool guard_result_ = false;
 };
 
 // Bit patterns for call_once state machine values.  Internal implementation
@@ -177,15 +177,8 @@
                                   scheduling_mode) == kOnceInit) {
     base_internal::invoke(std::forward<Callable>(fn),
                           std::forward<Args>(args)...);
-    // The call to SpinLockWake below is an optimization, because the waiter
-    // in SpinLockWait is waiting with a short timeout. The atomic load/store
-    // sequence is slightly faster than an atomic exchange:
-    //   old_control = control->exchange(base_internal::kOnceDone,
-    //                                   std::memory_order_release);
-    // We opt for a slightly faster case when there are no waiters, in spite
-    // of longer tail latency when there are waiters.
-    old_control = control->load(std::memory_order_relaxed);
-    control->store(base_internal::kOnceDone, std::memory_order_release);
+    old_control =
+        control->exchange(base_internal::kOnceDone, std::memory_order_release);
     if (old_control == base_internal::kOnceWaiter) {
       base_internal::SpinLockWake(control, true);
     }
diff --git a/abseil-cpp/absl/base/casts.h b/abseil-cpp/absl/base/casts.h
index 83c6912..d195888 100644
--- a/abseil-cpp/absl/base/casts.h
+++ b/abseil-cpp/absl/base/casts.h
@@ -29,6 +29,10 @@
 #include <type_traits>
 #include <utility>
 
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+#include <bit>  // For std::bit_cast.
+#endif  // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
 #include "absl/base/internal/identity.h"
 #include "absl/base/macros.h"
 #include "absl/meta/type_traits.h"
@@ -36,19 +40,6 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-namespace internal_casts {
-
-template <class Dest, class Source>
-struct is_bitcastable
-    : std::integral_constant<
-          bool,
-          sizeof(Dest) == sizeof(Source) &&
-              type_traits_internal::is_trivially_copyable<Source>::value &&
-              type_traits_internal::is_trivially_copyable<Dest>::value &&
-              std::is_default_constructible<Dest>::value> {};
-
-}  // namespace internal_casts
-
 // implicit_cast()
 //
 // Performs an implicit conversion between types following the language
@@ -105,81 +96,83 @@
 
 // bit_cast()
 //
-// Performs a bitwise cast on a type without changing the underlying bit
-// representation of that type's value. The two types must be of the same size
-// and both types must be trivially copyable. As with most casts, use with
-// caution. A `bit_cast()` might be needed when you need to temporarily treat a
-// type as some other type, such as in the following cases:
+// Creates a value of the new type `Dest` whose representation is the same as
+// that of the argument, which is of (deduced) type `Source` (a "bitwise cast";
+// every bit in the value representation of the result is equal to the
+// corresponding bit in the object representation of the source). Source and
+// destination types must be of the same size, and both types must be trivially
+// copyable.
 //
-//    * Serialization (casting temporarily to `char *` for those purposes is
-//      always allowed by the C++ standard)
-//    * Managing the individual bits of a type within mathematical operations
-//      that are not normally accessible through that type
-//    * Casting non-pointer types to pointer types (casting the other way is
-//      allowed by `reinterpret_cast()` but round-trips cannot occur the other
-//      way).
-//
-// Example:
+// As with most casts, use with caution. A `bit_cast()` might be needed when you
+// need to treat a value as the value of some other type, for example, to access
+// the individual bits of an object which are not normally accessible through
+// the object's type, such as for working with the binary representation of a
+// floating point value:
 //
 //   float f = 3.14159265358979;
-//   int i = bit_cast<int32_t>(f);
+//   int i = bit_cast<int>(f);
 //   // i = 0x40490fdb
 //
-// Casting non-pointer types to pointer types and then dereferencing them
-// traditionally produces undefined behavior.
+// Reinterpreting and accessing a value directly as a different type (as shown
+// below) usually results in undefined behavior.
 //
 // Example:
 //
 //   // WRONG
-//   float f = 3.14159265358979;            // WRONG
-//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//   float f = 3.14159265358979;
+//   int i = reinterpret_cast<int&>(f);    // Wrong
+//   int j = *reinterpret_cast<int*>(&f);  // Equally wrong
+//   int k = *bit_cast<int*>(&f);          // Equally wrong
 //
-// The address-casting method produces undefined behavior according to the ISO
-// C++ specification section [basic.lval]. Roughly, this section says: if an
-// object in memory has one type, and a program accesses it with a different
-// type, the result is undefined behavior for most values of "different type".
+// Reinterpret-casting results in undefined behavior according to the ISO C++
+// specification, section [basic.lval]. Roughly, this section says: if an object
+// in memory has one type, and a program accesses it with a different type, the
+// result is undefined behavior for most "different type".
+//
+// Using bit_cast on a pointer and then dereferencing it is no better than using
+// reinterpret_cast. You should only use bit_cast on the value itself.
 //
 // Such casting results in type punning: holding an object in memory of one type
 // and reading its bits back using a different type. A `bit_cast()` avoids this
-// issue by implementing its casts using `memcpy()`, which avoids introducing
-// this undefined behavior.
+// issue by copying the object representation to a new value, which avoids
+// introducing this undefined behavior (since the original value is never
+// accessed in the wrong way).
 //
-// NOTE: The requirements here are more strict than the bit_cast of standard
-// proposal p0476 due to the need for workarounds and lack of intrinsics.
-// Specifically, this implementation also requires `Dest` to be
-// default-constructible.
+// The requirements of `absl::bit_cast` are more strict than that of
+// `std::bit_cast` unless compiler support is available. Specifically, without
+// compiler support, this implementation also requires `Dest` to be
+// default-constructible. In C++20, `absl::bit_cast` is replaced by
+// `std::bit_cast`.
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+using std::bit_cast;
+
+#else  // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
 template <
     typename Dest, typename Source,
-    typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value,
+    typename std::enable_if<sizeof(Dest) == sizeof(Source) &&
+                                std::is_trivially_copyable<Source>::value &&
+                                std::is_trivially_copyable<Dest>::value
+#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+                                && std::is_default_constructible<Dest>::value
+#endif  // !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+                            ,
                             int>::type = 0>
+#if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+inline constexpr Dest bit_cast(const Source& source) {
+  return __builtin_bit_cast(Dest, source);
+}
+#else  // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
 inline Dest bit_cast(const Source& source) {
   Dest dest;
   memcpy(static_cast<void*>(std::addressof(dest)),
          static_cast<const void*>(std::addressof(source)), sizeof(dest));
   return dest;
 }
+#endif  // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
 
-// NOTE: This overload is only picked if the requirements of bit_cast are
-// not met. It is therefore UB, but is provided temporarily as previous
-// versions of this function template were unchecked. Do not use this in
-// new code.
-template <
-    typename Dest, typename Source,
-    typename std::enable_if<
-        !internal_casts::is_bitcastable<Dest, Source>::value,
-        int>::type = 0>
-ABSL_DEPRECATED(
-    "absl::bit_cast type requirements were violated. Update the types "
-    "being used such that they are the same size and are both "
-    "TriviallyCopyable.")
-inline Dest bit_cast(const Source& source) {
-  static_assert(sizeof(Dest) == sizeof(Source),
-                "Source and destination types should have equal sizes.");
-
-  Dest dest;
-  memcpy(&dest, &source, sizeof(dest));
-  return dest;
-}
+#endif  // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/config.h b/abseil-cpp/absl/base/config.h
index c1d0494..1de7993 100644
--- a/abseil-cpp/absl/base/config.h
+++ b/abseil-cpp/absl/base/config.h
@@ -56,6 +56,25 @@
 #include <cstddef>
 #endif  // __cplusplus
 
+// ABSL_INTERNAL_CPLUSPLUS_LANG
+//
+// MSVC does not set the value of __cplusplus correctly, but instead uses
+// _MSVC_LANG as a stand-in.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+//
+// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at
+// times, for example:
+// https://github.com/microsoft/vscode-cpptools/issues/1770
+// https://reviews.llvm.org/D70996
+//
+// For this reason, this symbol is considered INTERNAL and code outside of
+// Abseil must not use it.
+#if defined(_MSVC_LANG)
+#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG
+#elif defined(__cplusplus)
+#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus
+#endif
+
 #if defined(__APPLE__)
 // Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
 // __IPHONE_8_0.
@@ -66,6 +85,35 @@
 #include "absl/base/options.h"
 #include "absl/base/policy_checks.h"
 
+// Abseil long-term support (LTS) releases will define
+// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the
+// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the
+// integer representing the patch-level for that release.
+//
+// For example, for LTS release version "20300401.2", this would give us
+// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2
+//
+// These symbols will not be defined in non-LTS code.
+//
+// Abseil recommends that clients live-at-head. Therefore, if you are using
+// these symbols to assert a minimum version requirement, we recommend you do it
+// as
+//
+// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401
+// #error Project foo requires Abseil LTS version >= 20300401
+// #endif
+//
+// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes
+// live-at-head clients from the minimum version assertion.
+//
+// See https://abseil.io/about/releases for more information on Abseil release
+// management.
+//
+// LTS releases can be obtained from
+// https://github.com/abseil/abseil-cpp/releases.
+#define ABSL_LTS_RELEASE_VERSION 20230802
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
+
 // Helper macro to convert a CPP variable to a string literal.
 #define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
 #define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x)
@@ -121,10 +169,16 @@
 #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
 #define ABSL_NAMESPACE_BEGIN
 #define ABSL_NAMESPACE_END
+#define ABSL_INTERNAL_C_SYMBOL(x) x
 #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
 #define ABSL_NAMESPACE_BEGIN \
   inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
 #define ABSL_NAMESPACE_END }
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
+  ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
+#define ABSL_INTERNAL_C_SYMBOL(x) \
+  ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
 #else
 #error options.h is misconfigured.
 #endif
@@ -148,39 +202,43 @@
 #define ABSL_HAVE_BUILTIN(x) 0
 #endif
 
-#if defined(__is_identifier)
-#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x))
-#else
-#define ABSL_INTERNAL_HAS_KEYWORD(x) 0
-#endif
-
 #ifdef __has_feature
 #define ABSL_HAVE_FEATURE(f) __has_feature(f)
 #else
 #define ABSL_HAVE_FEATURE(f) 0
 #endif
 
+// Portable check for GCC minimum version:
+// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \
+  (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+#else
+#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0
+#endif
+
+#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__)
+#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \
+  (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y))
+#else
+#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0
+#endif
+
 // ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
-// We assume __thread is supported on Linux when compiled with Clang or compiled
-// against libstdc++ with _GLIBCXX_HAVE_TLS defined.
+// We assume __thread is supported on Linux or Asylo when compiled with Clang or
+// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
 #ifdef ABSL_HAVE_TLS
 #error ABSL_HAVE_TLS cannot be directly set
-#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#elif (defined(__linux__) || defined(__ASYLO__)) && \
+    (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
 #define ABSL_HAVE_TLS 1
 #endif
 
 // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
 //
 // Checks whether `std::is_trivially_destructible<T>` is supported.
-//
-// Notes: All supported compilers using libc++ support this feature, as does
-// gcc >= 4.8.1 using libstdc++, and Visual Studio.
 #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
 #error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
-#elif defined(_LIBCPP_VERSION) ||                                        \
-    (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \
-     (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) ||        \
-    defined(_MSC_VER)
 #define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
 #endif
 
@@ -188,35 +246,27 @@
 //
 // Checks whether `std::is_trivially_default_constructible<T>` and
 // `std::is_trivially_copy_constructible<T>` are supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
+#else
+#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#endif
 
 // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
 //
 // Checks whether `std::is_trivially_copy_assignable<T>` is supported.
-
-// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with
-// either libc++ or libstdc++, and Visual Studio (but not NVCC).
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
-#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
-#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
-#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
-#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) ||        \
-    (!defined(__clang__) && defined(__GNUC__) &&                 \
-     (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \
-     (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) ||      \
-    (defined(_MSC_VER) && !defined(__NVCC__))
-#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set
+#else
 #define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
 #endif
 
-// ABSL_HAVE_SOURCE_LOCATION_CURRENT
+// ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
 //
-// Indicates whether `absl::SourceLocation::current()` will return useful
-// information in some contexts.
-#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT
-#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \
-    ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE)
-#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
-#endif
+// Checks whether `std::is_trivially_copyable<T>` is supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set
+#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
 #endif
 
 // ABSL_HAVE_THREAD_LOCAL
@@ -311,25 +361,21 @@
 // For further details, consult the compiler's documentation.
 #ifdef ABSL_HAVE_EXCEPTIONS
 #error ABSL_HAVE_EXCEPTIONS cannot be directly set.
-
-#elif defined(__clang__)
-
-#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)
+#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6)
 // Clang >= 3.6
 #if ABSL_HAVE_FEATURE(cxx_exceptions)
 #define ABSL_HAVE_EXCEPTIONS 1
 #endif  // ABSL_HAVE_FEATURE(cxx_exceptions)
-#else
+#elif defined(__clang__)
 // Clang < 3.6
 // http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro
 #if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
 #define ABSL_HAVE_EXCEPTIONS 1
 #endif  // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
-#endif  // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)
-
 // Handle remaining special cases and default to exceptions being supported.
-#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) &&    \
-    !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \
+#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \
+    !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) &&                        \
+      !defined(__cpp_exceptions)) &&                                      \
     !(defined(_MSC_VER) && !defined(_CPPUNWIND))
 #define ABSL_HAVE_EXCEPTIONS 1
 #endif
@@ -361,10 +407,12 @@
 // POSIX.1-2001.
 #ifdef ABSL_HAVE_MMAP
 #error ABSL_HAVE_MMAP cannot be directly set
-#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) ||   \
-    defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
-    defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \
-    defined(__ASYLO__)
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+    defined(_AIX) || defined(__ros__) || defined(__native_client__) ||    \
+    defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) ||    \
+    defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) ||       \
+    defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) ||  \
+    defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__)
 #define ABSL_HAVE_MMAP 1
 #endif
 
@@ -375,17 +423,28 @@
 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
 #error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
 #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
-    defined(__ros__)
+    defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) ||          \
+    defined(__NetBSD__) || defined(__VXWORKS__)
 #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
 #endif
 
+// ABSL_HAVE_SCHED_GETCPU
+//
+// Checks whether sched_getcpu is available.
+#ifdef ABSL_HAVE_SCHED_GETCPU
+#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
+#elif defined(__linux__)
+#define ABSL_HAVE_SCHED_GETCPU 1
+#endif
+
 // ABSL_HAVE_SCHED_YIELD
 //
 // Checks whether the platform implements sched_yield(2) as defined in
 // POSIX.1-2001.
 #ifdef ABSL_HAVE_SCHED_YIELD
 #error ABSL_HAVE_SCHED_YIELD cannot be directly set
-#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
+#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \
+    defined(__VXWORKS__)
 #define ABSL_HAVE_SCHED_YIELD 1
 #endif
 
@@ -400,7 +459,7 @@
 // platforms.
 #ifdef ABSL_HAVE_SEMAPHORE_H
 #error ABSL_HAVE_SEMAPHORE_H cannot be directly set
-#elif defined(__linux__) || defined(__ros__)
+#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__)
 #define ABSL_HAVE_SEMAPHORE_H 1
 #endif
 
@@ -428,6 +487,8 @@
 #elif defined(__Fuchsia__)
 // Signals don't exist on fuchsia.
 #elif defined(__native_client__)
+// Signals don't exist on hexagon/QuRT
+#elif defined(__hexagon__)
 #else
 // other standard libraries
 #define ABSL_HAVE_ALARM 1
@@ -461,22 +522,29 @@
 #error "absl endian detection needs to be set up for your compiler"
 #endif
 
-// macOS 10.13 and iOS 10.11 don't let you use <any>, <optional>, or <variant>
-// even though the headers exist and are publicly noted to work.  See
+// macOS < 10.13 and iOS < 12 don't support <any>, <optional>, or <variant>
+// because the libc++ shared library shipped on the system doesn't have the
+// requisite exported symbols.  See
 // https://github.com/abseil/abseil-cpp/issues/207 and
 // https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
+//
 // libc++ spells out the availability requirements in the file
 // llvm-project/libcxx/include/__config via the #define
-// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
-#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
-  ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
-   __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
-  (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
-   __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
-  (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
-   __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
-  (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
-   __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
+// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. The set of versions has been
+// modified a few times, via
+// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953
+// and
+// https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a
+// The second has the actually correct versions, thus, is what we copy here.
+#if defined(__APPLE__) &&                                           \
+    ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) &&     \
+      __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) ||    \
+     (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) &&    \
+      __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) ||   \
+     (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) &&     \
+      __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) ||     \
+     (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) &&        \
+      __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
 #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
 #else
 #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
@@ -484,71 +552,44 @@
 
 // ABSL_HAVE_STD_ANY
 //
-// Checks whether C++17 std::any is available by checking whether <any> exists.
+// Checks whether C++17 std::any is available.
 #ifdef ABSL_HAVE_STD_ANY
 #error "ABSL_HAVE_STD_ANY cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<any>) && __cplusplus >= 201703L && \
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
     !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
 #define ABSL_HAVE_STD_ANY 1
 #endif
-#endif
 
 // ABSL_HAVE_STD_OPTIONAL
 //
 // Checks whether C++17 std::optional is available.
 #ifdef ABSL_HAVE_STD_OPTIONAL
 #error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<optional>) && __cplusplus >= 201703L && \
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) &&  \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
     !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
 #define ABSL_HAVE_STD_OPTIONAL 1
 #endif
-#endif
 
 // ABSL_HAVE_STD_VARIANT
 //
 // Checks whether C++17 std::variant is available.
 #ifdef ABSL_HAVE_STD_VARIANT
 #error "ABSL_HAVE_STD_VARIANT cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<variant>) && __cplusplus >= 201703L && \
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
     !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
 #define ABSL_HAVE_STD_VARIANT 1
 #endif
-#endif
 
 // ABSL_HAVE_STD_STRING_VIEW
 //
 // Checks whether C++17 std::string_view is available.
 #ifdef ABSL_HAVE_STD_STRING_VIEW
 #error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<string_view>) && __cplusplus >= 201703L
-#define ABSL_HAVE_STD_STRING_VIEW 1
-#endif
-#endif
-
-// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
-// the support for <optional>, <any>, <string_view>, <variant>. So we use
-// _MSC_VER to check whether we have VS 2017 RTM (when <optional>, <any>,
-// <string_view>, <variant> is implemented) or higher. Also, `__cplusplus` is
-// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
-// version.
-// TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
-    ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402)
-// #define ABSL_HAVE_STD_ANY 1
-#define ABSL_HAVE_STD_OPTIONAL 1
-#define ABSL_HAVE_STD_VARIANT 1
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
 #define ABSL_HAVE_STD_STRING_VIEW 1
 #endif
 
@@ -644,8 +685,6 @@
 #endif
 #endif
 
-#undef ABSL_INTERNAL_HAS_KEYWORD
-
 // ABSL_DLL
 //
 // When building Abseil as a DLL, this macro expands to `__declspec(dllexport)`
@@ -665,18 +704,24 @@
 #define ABSL_DLL
 #endif  // defined(_MSC_VER)
 
+#if defined(_MSC_VER)
+#if defined(ABSL_BUILD_TEST_DLL)
+#define ABSL_TEST_DLL __declspec(dllexport)
+#elif defined(ABSL_CONSUME_TEST_DLL)
+#define ABSL_TEST_DLL __declspec(dllimport)
+#else
+#define ABSL_TEST_DLL
+#endif
+#else
+#define ABSL_TEST_DLL
+#endif  // defined(_MSC_VER)
+
 // ABSL_HAVE_MEMORY_SANITIZER
 //
 // MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of
 // a compiler instrumentation module and a run-time library.
 #ifdef ABSL_HAVE_MEMORY_SANITIZER
 #error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
-#elif defined(MEMORY_SANITIZER)
-// The MEMORY_SANITIZER macro is deprecated but we will continue to honor it
-// for now.
-#define ABSL_HAVE_MEMORY_SANITIZER 1
-#elif defined(__SANITIZE_MEMORY__)
-#define ABSL_HAVE_MEMORY_SANITIZER 1
 #elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer)
 #define ABSL_HAVE_MEMORY_SANITIZER 1
 #endif
@@ -686,10 +731,6 @@
 // ThreadSanitizer (TSan) is a fast data race detector.
 #ifdef ABSL_HAVE_THREAD_SANITIZER
 #error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set."
-#elif defined(THREAD_SANITIZER)
-// The THREAD_SANITIZER macro is deprecated but we will continue to honor it
-// for now.
-#define ABSL_HAVE_THREAD_SANITIZER 1
 #elif defined(__SANITIZE_THREAD__)
 #define ABSL_HAVE_THREAD_SANITIZER 1
 #elif ABSL_HAVE_FEATURE(thread_sanitizer)
@@ -701,14 +742,196 @@
 // AddressSanitizer (ASan) is a fast memory error detector.
 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
 #error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set."
-#elif defined(ADDRESS_SANITIZER)
-// The ADDRESS_SANITIZER macro is deprecated but we will continue to honor it
-// for now.
-#define ABSL_HAVE_ADDRESS_SANITIZER 1
 #elif defined(__SANITIZE_ADDRESS__)
 #define ABSL_HAVE_ADDRESS_SANITIZER 1
 #elif ABSL_HAVE_FEATURE(address_sanitizer)
 #define ABSL_HAVE_ADDRESS_SANITIZER 1
 #endif
 
+// ABSL_HAVE_HWADDRESS_SANITIZER
+//
+// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan
+// memory error detector which can use CPU features like ARM TBI, Intel LAM or
+// AMD UAI.
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_HWADDRESS__)
+#define ABSL_HAVE_HWADDRESS_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer)
+#define ABSL_HAVE_HWADDRESS_SANITIZER 1
+#endif
+
+// ABSL_HAVE_DATAFLOW_SANITIZER
+//
+// Dataflow Sanitizer (or DFSAN) is a generalised dynamic data flow analysis.
+#ifdef ABSL_HAVE_DATAFLOW_SANITIZER
+#error "ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set."
+#elif defined(DATAFLOW_SANITIZER)
+// GCC provides no method for detecting the presence of the standalone
+// DataFlowSanitizer (-fsanitize=dataflow), so GCC users of -fsanitize=dataflow
+// should also use -DDATAFLOW_SANITIZER.
+#define ABSL_HAVE_DATAFLOW_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(dataflow_sanitizer)
+#define ABSL_HAVE_DATAFLOW_SANITIZER 1
+#endif
+
+// ABSL_HAVE_LEAK_SANITIZER
+//
+// LeakSanitizer (or lsan) is a detector of memory leaks.
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time
+// whether the LeakSanitizer is potentially available. However, just because the
+// LeakSanitizer is available does not mean it is active. Use the
+// always-available run-time interface in //absl/debugging/leak_check.h for
+// interacting with LeakSanitizer.
+#ifdef ABSL_HAVE_LEAK_SANITIZER
+#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
+#elif defined(LEAK_SANITIZER)
+// GCC provides no method for detecting the presence of the standalone
+// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also
+// use -DLEAK_SANITIZER.
+#define ABSL_HAVE_LEAK_SANITIZER 1
+// Clang standalone LeakSanitizer (-fsanitize=leak)
+#elif ABSL_HAVE_FEATURE(leak_sanitizer)
+#define ABSL_HAVE_LEAK_SANITIZER 1
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER)
+// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer.
+#define ABSL_HAVE_LEAK_SANITIZER 1
+#endif
+
+// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+//
+// Class template argument deduction is a language feature added in C++17.
+#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
+#elif defined(__cpp_deduction_guides)
+#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
+#endif
+
+// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+//
+// Prior to C++17, static constexpr variables defined in classes required a
+// separate definition outside of the class body, for example:
+//
+// class Foo {
+//   static constexpr int kBar = 0;
+// };
+// constexpr int Foo::kBar;
+//
+// In C++17, these variables defined in classes are considered inline variables,
+// and the extra declaration is redundant. Since some compilers warn on the
+// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used
+// conditionally ignore them:
+//
+// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+// constexpr int Foo::kBar;
+// #endif
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1
+#endif
+
+// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with
+// RTTI support.
+#ifdef ABSL_INTERNAL_HAS_RTTI
+#error ABSL_INTERNAL_HAS_RTTI cannot be directly set
+#elif (defined(__GNUC__) && defined(__GXX_RTTI)) || \
+    (defined(_MSC_VER) && defined(_CPPRTTI)) ||     \
+    (!defined(__GNUC__) && !defined(_MSC_VER))
+#define ABSL_INTERNAL_HAS_RTTI 1
+#endif  // !defined(__GNUC__) || defined(__GXX_RTTI)
+
+// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef ABSL_INTERNAL_HAVE_SSE
+#error ABSL_INTERNAL_HAVE_SSE cannot be directly set
+#elif defined(__SSE__)
+#define ABSL_INTERNAL_HAVE_SSE 1
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \
+    !defined(_M_ARM64EC)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1
+// indicates that at least SSE was targeted with the /arch:SSE option.
+// All x86-64 processors support SSE, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define ABSL_INTERNAL_HAVE_SSE 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
+#elif defined(__SSE2__)
+#define ABSL_INTERNAL_HAVE_SSE2 1
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \
+    !defined(_M_ARM64EC)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2
+// indicates that at least SSE2 was targeted with the /arch:SSE2 option.
+// All x86-64 processors support SSE2, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define ABSL_INTERNAL_HAVE_SSE2 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+//
+// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3
+// with MSVC requires either assuming that the code will only every run on CPUs
+// that support SSSE3, otherwise __cpuid() can be used to detect support at
+// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported
+// by the CPU.
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set
+#elif defined(__SSSE3__)
+#define ABSL_INTERNAL_HAVE_SSSE3 1
+#endif
+
+// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM
+// SIMD).
+//
+// If __CUDA_ARCH__ is defined, then we are compiling CUDA code in device mode.
+// In device mode, NEON intrinsics are not available, regardless of host
+// platform.
+// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
+#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__)
+#define ABSL_INTERNAL_HAVE_ARM_NEON 1
+#endif
+
+// ABSL_HAVE_CONSTANT_EVALUATED is used for compile-time detection of
+// constant evaluation support through `absl::is_constant_evaluated`.
+#ifdef ABSL_HAVE_CONSTANT_EVALUATED
+#error ABSL_HAVE_CONSTANT_EVALUATED cannot be directly set
+#endif
+#ifdef __cpp_lib_is_constant_evaluated
+#define ABSL_HAVE_CONSTANT_EVALUATED 1
+#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
+#define ABSL_HAVE_CONSTANT_EVALUATED 1
+#endif
+
+// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
+// into an integer that can be compared against.
+#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#error ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set
+#endif
+#ifdef __EMSCRIPTEN__
+#include <emscripten/version.h>
+#ifdef __EMSCRIPTEN_major__
+#if __EMSCRIPTEN_minor__ >= 1000
+#error __EMSCRIPTEN_minor__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#if __EMSCRIPTEN_tiny__ >= 1000
+#error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#define ABSL_INTERNAL_EMSCRIPTEN_VERSION                          \
+  ((__EMSCRIPTEN_major__)*1000000 + (__EMSCRIPTEN_minor__)*1000 + \
+   (__EMSCRIPTEN_tiny__))
+#endif
+#endif
+
 #endif  // ABSL_BASE_CONFIG_H_
diff --git a/abseil-cpp/absl/base/dynamic_annotations.h b/abseil-cpp/absl/base/dynamic_annotations.h
index 545f8cb..7ba8912 100644
--- a/abseil-cpp/absl/base/dynamic_annotations.h
+++ b/abseil-cpp/absl/base/dynamic_annotations.h
@@ -46,6 +46,7 @@
 #define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
 
 #include <stddef.h>
+#include <stdint.h>
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
@@ -53,6 +54,10 @@
 #include "absl/base/macros.h"
 #endif
 
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#include <sanitizer/hwasan_interface.h>
+#endif
+
 // TODO(rogeeff): Remove after the backward compatibility period.
 #include "absl/base/internal/dynamic_annotations.h"  // IWYU pragma: export
 
@@ -110,6 +115,9 @@
 // Define race annotations.
 
 #if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
+// defined by the compiler-based sanitizer implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
 
 // -------------------------------------------------------------
 // Annotations that suppress errors. It is usually better to express the
@@ -286,17 +294,22 @@
 // Define IGNORE_READS_BEGIN/_END annotations.
 
 #if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
+// defined by the compiler-based implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
 
 // Request the analysis tool to ignore all reads in the current thread until
 // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
 // reads, while still checking other reads and all writes.
 // See also ABSL_ANNOTATE_UNPROTECTED_READ.
-#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
-  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN()              \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
+  (__FILE__, __LINE__)
 
 // Stop ignoring reads.
-#define ABSL_ANNOTATE_IGNORE_READS_END() \
-  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+#define ABSL_ANNOTATE_IGNORE_READS_END()              \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
+  (__FILE__, __LINE__)
 
 // Function prototypes of annotations provided by the compiler-based sanitizer
 // implementation.
@@ -316,16 +329,22 @@
 // TODO(delesley) -- The exclusive lock here ignores writes as well, but
 // allows IGNORE_READS_AND_WRITES to work properly.
 
-#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
-  ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN()                          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(                                      \
+      ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
+  ()
 
-#define ABSL_ANNOTATE_IGNORE_READS_END() \
-  ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
+#define ABSL_ANNOTATE_IGNORE_READS_END()                          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(                                    \
+      ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
+  ()
 
-ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsBegin()
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalAnnotateIgnoreReadsBegin)()
     ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
 
-ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsEnd()
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalAnnotateIgnoreReadsEnd)()
     ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
 
 #else
@@ -419,31 +438,6 @@
 
 #endif
 
-#ifdef __cplusplus
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-ABSL_INTERNAL_BEGIN_EXTERN_C
-int RunningOnValgrind();
-double ValgrindSlowdown();
-ABSL_INTERNAL_END_EXTERN_C
-#else
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-ABSL_DEPRECATED(
-    "Don't use this interface. It is misleading and is being deleted.")
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline int RunningOnValgrind() { return 0; }
-ABSL_DEPRECATED(
-    "Don't use this interface. It is misleading and is being deleted.")
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline double ValgrindSlowdown() { return 1.0; }
-}  // namespace base_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-using absl::base_internal::RunningOnValgrind;
-using absl::base_internal::ValgrindSlowdown;
-#endif
-#endif
-
 // -------------------------------------------------------------------------
 // Address sanitizer annotations
 
@@ -457,7 +451,7 @@
   __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
 #define ABSL_ADDRESS_SANITIZER_REDZONE(name) \
   struct {                                   \
-    char x[8] __attribute__((aligned(8)));   \
+    alignas(8) char x[8];                    \
   } name
 
 #else
@@ -468,6 +462,26 @@
 #endif  // ABSL_HAVE_ADDRESS_SANITIZER
 
 // -------------------------------------------------------------------------
+// HWAddress sanitizer annotations
+
+#ifdef __cplusplus
+namespace absl {
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+// Under HWASAN changes the tag of the pointer.
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t tag) {
+  return reinterpret_cast<T*>(__hwasan_tag_pointer(ptr, tag));
+}
+#else
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t) {
+  return ptr;
+}
+#endif
+}  // namespace absl
+#endif
+
+// -------------------------------------------------------------------------
 // Undefine the macros intended only for this file.
 
 #undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
diff --git a/abseil-cpp/absl/base/exception_safety_testing_test.cc b/abseil-cpp/absl/base/exception_safety_testing_test.cc
index a59be29..bf5aa7c 100644
--- a/abseil-cpp/absl/base/exception_safety_testing_test.cc
+++ b/abseil-cpp/absl/base/exception_safety_testing_test.cc
@@ -148,7 +148,7 @@
   ThrowingValue<> bomb1, bomb2;
 
   TestOp([&bomb1]() { ~bomb1; });
-  TestOp([&]() { bomb1& bomb2; });
+  TestOp([&]() { bomb1 & bomb2; });
   TestOp([&]() { bomb1 | bomb2; });
   TestOp([&]() { bomb1 ^ bomb2; });
 }
@@ -332,13 +332,16 @@
   constexpr int kArrayLen = 2;
   // We intentionally create extra space to store the tag allocated by placement
   // new[].
-  constexpr int kStorageLen = 4;
+  constexpr size_t kExtraSpaceLen = sizeof(size_t) * 2;
 
   alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)];
   alignas(ThrowingValue<>) unsigned char
-      array_buf[sizeof(ThrowingValue<>[kStorageLen])];
+      array_buf[kExtraSpaceLen + sizeof(ThrowingValue<>[kArrayLen])];
   auto* placed = new (&buf) ThrowingValue<>(1);
   auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen];
+  auto* placed_array_end = reinterpret_cast<unsigned char*>(placed_array) +
+                           sizeof(ThrowingValue<>[kArrayLen]);
+  EXPECT_LE(placed_array_end, array_buf + sizeof(array_buf));
 
   SetCountdown();
   ExpectNoThrow([placed, &buf]() {
@@ -701,7 +704,10 @@
 
   static constexpr int kExceptionSentinel = 9999;
 };
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel;
+#endif
 
 TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) {
   auto tester_with_val =
diff --git a/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h b/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h
index 3e72b49..c72015e 100644
--- a/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h
+++ b/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
-#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
 
 #include "absl/base/internal/atomic_hook.h"
 
@@ -31,4 +31,4 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+#endif  // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
diff --git a/abseil-cpp/absl/base/internal/bits.h b/abseil-cpp/absl/base/internal/bits.h
deleted file mode 100644
index 81648e2..0000000
--- a/abseil-cpp/absl/base/internal/bits.h
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_BITS_H_
-#define ABSL_BASE_INTERNAL_BITS_H_
-
-// This file contains bitwise ops which are implementation details of various
-// absl libraries.
-
-#include <cstdint>
-
-#include "absl/base/config.h"
-
-// Clang on Windows has __builtin_clzll; otherwise we need to use the
-// windows intrinsic functions.
-#if defined(_MSC_VER) && !defined(__clang__)
-#include <intrin.h>
-#if defined(_M_X64)
-#pragma intrinsic(_BitScanReverse64)
-#pragma intrinsic(_BitScanForward64)
-#endif
-#pragma intrinsic(_BitScanReverse)
-#pragma intrinsic(_BitScanForward)
-#endif
-
-#include "absl/base/attributes.h"
-
-#if defined(_MSC_VER) && !defined(__clang__)
-// We can achieve something similar to attribute((always_inline)) with MSVC by
-// using the __forceinline keyword, however this is not perfect. MSVC is
-// much less aggressive about inlining, and even with the __forceinline keyword.
-#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline
-#else
-// Use default attribute inline.
-#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE
-#endif
-
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
-  int zeroes = 60;
-  if (n >> 32) {
-    zeroes -= 32;
-    n >>= 32;
-  }
-  if (n >> 16) {
-    zeroes -= 16;
-    n >>= 16;
-  }
-  if (n >> 8) {
-    zeroes -= 8;
-    n >>= 8;
-  }
-  if (n >> 4) {
-    zeroes -= 4;
-    n >>= 4;
-  }
-  return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
-#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
-  // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
-  unsigned long result = 0;  // NOLINT(runtime/int)
-  if (_BitScanReverse64(&result, n)) {
-    return 63 - result;
-  }
-  return 64;
-#elif defined(_MSC_VER) && !defined(__clang__)
-  // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
-  unsigned long result = 0;  // NOLINT(runtime/int)
-  if ((n >> 32) &&
-      _BitScanReverse(&result, static_cast<unsigned long>(n >> 32))) {
-    return 31 - result;
-  }
-  if (_BitScanReverse(&result, static_cast<unsigned long>(n))) {
-    return 63 - result;
-  }
-  return 64;
-#elif defined(__GNUC__) || defined(__clang__)
-  // Use __builtin_clzll, which uses the following instructions:
-  //  x86: bsr
-  //  ARM64: clz
-  //  PPC: cntlzd
-  static_assert(sizeof(unsigned long long) == sizeof(n),  // NOLINT(runtime/int)
-                "__builtin_clzll does not take 64-bit arg");
-
-  // Handle 0 as a special case because __builtin_clzll(0) is undefined.
-  if (n == 0) {
-    return 64;
-  }
-  return __builtin_clzll(n);
-#else
-  return CountLeadingZeros64Slow(n);
-#endif
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
-  int zeroes = 28;
-  if (n >> 16) {
-    zeroes -= 16;
-    n >>= 16;
-  }
-  if (n >> 8) {
-    zeroes -= 8;
-    n >>= 8;
-  }
-  if (n >> 4) {
-    zeroes -= 4;
-    n >>= 4;
-  }
-  return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
-#if defined(_MSC_VER) && !defined(__clang__)
-  unsigned long result = 0;  // NOLINT(runtime/int)
-  if (_BitScanReverse(&result, n)) {
-    return 31 - result;
-  }
-  return 32;
-#elif defined(__GNUC__) || defined(__clang__)
-  // Use __builtin_clz, which uses the following instructions:
-  //  x86: bsr
-  //  ARM64: clz
-  //  PPC: cntlzd
-  static_assert(sizeof(int) == sizeof(n),
-                "__builtin_clz does not take 32-bit arg");
-
-  // Handle 0 as a special case because __builtin_clz(0) is undefined.
-  if (n == 0) {
-    return 32;
-  }
-  return __builtin_clz(n);
-#else
-  return CountLeadingZeros32Slow(n);
-#endif
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
-  int c = 63;
-  n &= ~n + 1;
-  if (n & 0x00000000FFFFFFFF) c -= 32;
-  if (n & 0x0000FFFF0000FFFF) c -= 16;
-  if (n & 0x00FF00FF00FF00FF) c -= 8;
-  if (n & 0x0F0F0F0F0F0F0F0F) c -= 4;
-  if (n & 0x3333333333333333) c -= 2;
-  if (n & 0x5555555555555555) c -= 1;
-  return c;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
-#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
-  unsigned long result = 0;  // NOLINT(runtime/int)
-  _BitScanForward64(&result, n);
-  return result;
-#elif defined(_MSC_VER) && !defined(__clang__)
-  unsigned long result = 0;  // NOLINT(runtime/int)
-  if (static_cast<uint32_t>(n) == 0) {
-    _BitScanForward(&result, static_cast<unsigned long>(n >> 32));
-    return result + 32;
-  }
-  _BitScanForward(&result, static_cast<unsigned long>(n));
-  return result;
-#elif defined(__GNUC__) || defined(__clang__)
-  static_assert(sizeof(unsigned long long) == sizeof(n),  // NOLINT(runtime/int)
-                "__builtin_ctzll does not take 64-bit arg");
-  return __builtin_ctzll(n);
-#else
-  return CountTrailingZerosNonZero64Slow(n);
-#endif
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
-  int c = 31;
-  n &= ~n + 1;
-  if (n & 0x0000FFFF) c -= 16;
-  if (n & 0x00FF00FF) c -= 8;
-  if (n & 0x0F0F0F0F) c -= 4;
-  if (n & 0x33333333) c -= 2;
-  if (n & 0x55555555) c -= 1;
-  return c;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
-#if defined(_MSC_VER) && !defined(__clang__)
-  unsigned long result = 0;  // NOLINT(runtime/int)
-  _BitScanForward(&result, n);
-  return result;
-#elif defined(__GNUC__) || defined(__clang__)
-  static_assert(sizeof(int) == sizeof(n),
-                "__builtin_ctz does not take 32-bit arg");
-  return __builtin_ctz(n);
-#else
-  return CountTrailingZerosNonZero32Slow(n);
-#endif
-}
-
-#undef ABSL_BASE_INTERNAL_FORCEINLINE
-
-}  // namespace base_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_BASE_INTERNAL_BITS_H_
diff --git a/abseil-cpp/absl/base/internal/bits_test.cc b/abseil-cpp/absl/base/internal/bits_test.cc
deleted file mode 100644
index 7855fa6..0000000
--- a/abseil-cpp/absl/base/internal/bits_test.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/bits.h"
-
-#include "gtest/gtest.h"
-
-namespace {
-
-int CLZ64(uint64_t n) {
-  int fast = absl::base_internal::CountLeadingZeros64(n);
-  int slow = absl::base_internal::CountLeadingZeros64Slow(n);
-  EXPECT_EQ(fast, slow) << n;
-  return fast;
-}
-
-TEST(BitsTest, CountLeadingZeros64) {
-  EXPECT_EQ(64, CLZ64(uint64_t{}));
-  EXPECT_EQ(0, CLZ64(~uint64_t{}));
-
-  for (int index = 0; index < 64; index++) {
-    uint64_t x = static_cast<uint64_t>(1) << index;
-    const auto cnt = 63 - index;
-    ASSERT_EQ(cnt, CLZ64(x)) << index;
-    ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index;
-  }
-}
-
-int CLZ32(uint32_t n) {
-  int fast = absl::base_internal::CountLeadingZeros32(n);
-  int slow = absl::base_internal::CountLeadingZeros32Slow(n);
-  EXPECT_EQ(fast, slow) << n;
-  return fast;
-}
-
-TEST(BitsTest, CountLeadingZeros32) {
-  EXPECT_EQ(32, CLZ32(uint32_t{}));
-  EXPECT_EQ(0, CLZ32(~uint32_t{}));
-
-  for (int index = 0; index < 32; index++) {
-    uint32_t x = static_cast<uint32_t>(1) << index;
-    const auto cnt = 31 - index;
-    ASSERT_EQ(cnt, CLZ32(x)) << index;
-    ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index;
-    ASSERT_EQ(CLZ64(x), CLZ32(x) + 32);
-  }
-}
-
-int CTZ64(uint64_t n) {
-  int fast = absl::base_internal::CountTrailingZerosNonZero64(n);
-  int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n);
-  EXPECT_EQ(fast, slow) << n;
-  return fast;
-}
-
-TEST(BitsTest, CountTrailingZerosNonZero64) {
-  EXPECT_EQ(0, CTZ64(~uint64_t{}));
-
-  for (int index = 0; index < 64; index++) {
-    uint64_t x = static_cast<uint64_t>(1) << index;
-    const auto cnt = index;
-    ASSERT_EQ(cnt, CTZ64(x)) << index;
-    ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index;
-  }
-}
-
-int CTZ32(uint32_t n) {
-  int fast = absl::base_internal::CountTrailingZerosNonZero32(n);
-  int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n);
-  EXPECT_EQ(fast, slow) << n;
-  return fast;
-}
-
-TEST(BitsTest, CountTrailingZerosNonZero32) {
-  EXPECT_EQ(0, CTZ32(~uint32_t{}));
-
-  for (int index = 0; index < 32; index++) {
-    uint32_t x = static_cast<uint32_t>(1) << index;
-    const auto cnt = index;
-    ASSERT_EQ(cnt, CTZ32(x)) << index;
-    ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index;
-  }
-}
-
-
-}  // namespace
diff --git a/abseil-cpp/absl/base/internal/cycleclock.cc b/abseil-cpp/absl/base/internal/cycleclock.cc
index 0e65005..902e3f5 100644
--- a/abseil-cpp/absl/base/internal/cycleclock.cc
+++ b/abseil-cpp/absl/base/internal/cycleclock.cc
@@ -25,6 +25,8 @@
 #include <atomic>
 #include <chrono>  // NOLINT(build/c++11)
 
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/unscaledcycleclock.h"
 
 namespace absl {
@@ -33,44 +35,20 @@
 
 #if ABSL_USE_UNSCALED_CYCLECLOCK
 
-namespace {
-
-#ifdef NDEBUG
-#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-// Not debug mode and the UnscaledCycleClock frequency is the CPU
-// frequency.  Scale the CycleClock to prevent overflow if someone
-// tries to represent the time as cycles since the Unix epoch.
-static constexpr int32_t kShift = 1;
-#else
-// Not debug mode and the UnscaledCycleClock isn't operating at the
-// raw CPU frequency. There is no need to do any scaling, so don't
-// needlessly sacrifice precision.
-static constexpr int32_t kShift = 0;
-#endif
-#else
-// In debug mode use a different shift to discourage depending on a
-// particular shift value.
-static constexpr int32_t kShift = 2;
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int32_t CycleClock::kShift;
+constexpr double CycleClock::kFrequencyScale;
 #endif
 
-static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
-static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+ABSL_CONST_INIT std::atomic<CycleClockSourceFunc>
+    CycleClock::cycle_clock_source_{nullptr};
 
-CycleClockSourceFunc LoadCycleClockSource() {
-  // Optimize for the common case (no callback) by first doing a relaxed load;
-  // this is significantly faster on non-x86 platforms.
-  if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
-    return nullptr;
-  }
-  // This corresponds to the store(std::memory_order_release) in
-  // CycleClockSource::Register, and makes sure that any updates made prior to
-  // registering the callback are visible to this thread before the callback is
-  // invoked.
-  return cycle_clock_source.load(std::memory_order_acquire);
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+  // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+  CycleClock::cycle_clock_source_.store(source, std::memory_order_release);
 }
 
-}  // namespace
-
+#ifdef _WIN32
 int64_t CycleClock::Now() {
   auto fn = LoadCycleClockSource();
   if (fn == nullptr) {
@@ -78,15 +56,7 @@
   }
   return fn() >> kShift;
 }
-
-double CycleClock::Frequency() {
-  return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
-}
-
-void CycleClockSource::Register(CycleClockSourceFunc source) {
-  // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
-  cycle_clock_source.store(source, std::memory_order_release);
-}
+#endif
 
 #else
 
diff --git a/abseil-cpp/absl/base/internal/cycleclock.h b/abseil-cpp/absl/base/internal/cycleclock.h
index a18b584..cbfdf57 100644
--- a/abseil-cpp/absl/base/internal/cycleclock.h
+++ b/abseil-cpp/absl/base/internal/cycleclock.h
@@ -42,14 +42,20 @@
 #ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
 #define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
 
+#include <atomic>
 #include <cstdint>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/internal/cycleclock_config.h"
+#include "absl/base/internal/unscaledcycleclock.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace base_internal {
 
+using CycleClockSourceFunc = int64_t (*)();
+
 // -----------------------------------------------------------------------------
 // CycleClock
 // -----------------------------------------------------------------------------
@@ -68,12 +74,21 @@
   static double Frequency();
 
  private:
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+  static CycleClockSourceFunc LoadCycleClockSource();
+
+  static constexpr int32_t kShift = kCycleClockShift;
+  static constexpr double kFrequencyScale = kCycleClockFrequencyScale;
+
+  ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
+#endif  //  ABSL_USE_UNSCALED_CYCLECLOC
+
   CycleClock() = delete;  // no instances
   CycleClock(const CycleClock&) = delete;
   CycleClock& operator=(const CycleClock&) = delete;
-};
 
-using CycleClockSourceFunc = int64_t (*)();
+  friend class CycleClockSource;
+};
 
 class CycleClockSource {
  private:
@@ -87,6 +102,41 @@
   static void Register(CycleClockSourceFunc source);
 };
 
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
+#if !defined(__x86_64__)
+  // Optimize for the common case (no callback) by first doing a relaxed load;
+  // this is significantly faster on non-x86 platforms.
+  if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
+    return nullptr;
+  }
+#endif  // !defined(__x86_64__)
+
+  // This corresponds to the store(std::memory_order_release) in
+  // CycleClockSource::Register, and makes sure that any updates made prior to
+  // registering the callback are visible to this thread before the callback
+  // is invoked.
+  return cycle_clock_source_.load(std::memory_order_acquire);
+}
+
+// Accessing globals in inlined code in Window DLLs is problematic.
+#ifndef _WIN32
+inline int64_t CycleClock::Now() {
+  auto fn = LoadCycleClockSource();
+  if (fn == nullptr) {
+    return base_internal::UnscaledCycleClock::Now() >> kShift;
+  }
+  return fn() >> kShift;
+}
+#endif
+
+inline double CycleClock::Frequency() {
+  return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
+
 }  // namespace base_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/cycleclock_config.h b/abseil-cpp/absl/base/internal/cycleclock_config.h
new file mode 100644
index 0000000..191112b
--- /dev/null
+++ b/abseil-cpp/absl/base/internal/cycleclock_config.h
@@ -0,0 +1,55 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/inline_variable.h"
+#include "absl/base/internal/unscaledcycleclock_config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency.  Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1);
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0);
+#endif
+#else   // NDEBUG
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2);
+#endif  // NDEBUG
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale,
+                               1.0 / (1 << kCycleClockShift));
+#endif  //  ABSL_USE_UNSCALED_CYCLECLOC
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
diff --git a/abseil-cpp/absl/base/internal/direct_mmap.h b/abseil-cpp/absl/base/internal/direct_mmap.h
index 16accf0..1beb2ee 100644
--- a/abseil-cpp/absl/base/internal/direct_mmap.h
+++ b/abseil-cpp/absl/base/internal/direct_mmap.h
@@ -20,7 +20,7 @@
 
 #include "absl/base/config.h"
 
-#if ABSL_HAVE_MMAP
+#ifdef ABSL_HAVE_MMAP
 
 #include <sys/mman.h>
 
@@ -41,13 +41,13 @@
 
 #ifdef __mips__
 // Include definitions of the ABI currently in use.
-#ifdef __BIONIC__
+#if defined(__BIONIC__) || !defined(__GLIBC__)
 // Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
 // definitions we need.
 #include <asm/sgidefs.h>
 #else
 #include <sgidefs.h>
-#endif  // __BIONIC__
+#endif  // __BIONIC__ || !__GLIBC__
 #endif  // __mips__
 
 // SYS_mmap and SYS_munmap are not defined in Android.
@@ -72,12 +72,15 @@
 // Platform specific logic extracted from
 // https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
 inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
-                        off64_t offset) noexcept {
+                        off_t offset) noexcept {
 #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+    defined(__m68k__) || defined(__sh__) ||                                  \
+    (defined(__hppa__) && !defined(__LP64__)) ||                             \
     (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) ||                   \
     (defined(__PPC__) && !defined(__PPC64__)) ||                             \
     (defined(__riscv) && __riscv_xlen == 32) ||                              \
-    (defined(__s390__) && !defined(__s390x__))
+    (defined(__s390__) && !defined(__s390x__)) ||                            \
+    (defined(__sparc__) && !defined(__arch64__))
   // On these architectures, implement mmap with mmap2.
   static int pagesize = 0;
   if (pagesize == 0) {
@@ -94,11 +97,12 @@
 #ifdef __BIONIC__
   // SYS_mmap2 has problems on Android API level <= 16.
   // Workaround by invoking __mmap2() instead.
-  return __mmap2(start, length, prot, flags, fd, offset / pagesize);
+  return __mmap2(start, length, prot, flags, fd,
+                 static_cast<size_t>(offset / pagesize));
 #else
   return reinterpret_cast<void*>(
       syscall(SYS_mmap2, start, length, prot, flags, fd,
-              static_cast<off_t>(offset / pagesize)));
+              static_cast<unsigned long>(offset / pagesize)));  // NOLINT
 #endif
 #elif defined(__s390x__)
   // On s390x, mmap() arguments are passed in memory.
diff --git a/abseil-cpp/absl/base/internal/endian.h b/abseil-cpp/absl/base/internal/endian.h
index 9677530..50747d7 100644
--- a/abseil-cpp/absl/base/internal/endian.h
+++ b/abseil-cpp/absl/base/internal/endian.h
@@ -16,16 +16,10 @@
 #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
 #define ABSL_BASE_INTERNAL_ENDIAN_H_
 
-// The following guarantees declaration of the byte swap functions
-#ifdef _MSC_VER
-#include <stdlib.h>  // NOLINT(build/include)
-#elif defined(__FreeBSD__)
-#include <sys/endian.h>
-#elif defined(__GLIBC__)
-#include <byteswap.h>  // IWYU pragma: export
-#endif
-
 #include <cstdint>
+#include <cstdlib>
+
+#include "absl/base/casts.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/unaligned_access.h"
 #include "absl/base/port.h"
@@ -33,47 +27,11 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-// Use compiler byte-swapping intrinsics if they are available.  32-bit
-// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
-// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
-// For simplicity, we enable them all only for GCC 4.8.0 or later.
-#if defined(__clang__) || \
-    (defined(__GNUC__) && \
-     ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
 inline uint64_t gbswap_64(uint64_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
   return __builtin_bswap64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
-  return __builtin_bswap32(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
-  return __builtin_bswap16(host_int);
-}
-
 #elif defined(_MSC_VER)
-inline uint64_t gbswap_64(uint64_t host_int) {
   return _byteswap_uint64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
-  return _byteswap_ulong(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
-  return _byteswap_ushort(host_int);
-}
-
-#else
-inline uint64_t gbswap_64(uint64_t host_int) {
-#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
-  // Adapted from /usr/include/byteswap.h.  Not available on Mac.
-  if (__builtin_constant_p(host_int)) {
-    return __bswap_constant_64(host_int);
-  } else {
-    uint64_t result;
-    __asm__("bswap %0" : "=r"(result) : "0"(host_int));
-    return result;
-  }
-#elif defined(__GLIBC__)
-  return bswap_64(host_int);
 #else
   return (((host_int & uint64_t{0xFF}) << 56) |
           ((host_int & uint64_t{0xFF00}) << 40) |
@@ -83,12 +41,14 @@
           ((host_int & uint64_t{0xFF0000000000}) >> 24) |
           ((host_int & uint64_t{0xFF000000000000}) >> 40) |
           ((host_int & uint64_t{0xFF00000000000000}) >> 56));
-#endif  // bswap_64
+#endif
 }
 
 inline uint32_t gbswap_32(uint32_t host_int) {
-#if defined(__GLIBC__)
-  return bswap_32(host_int);
+#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
+  return __builtin_bswap32(host_int);
+#elif defined(_MSC_VER)
+  return _byteswap_ulong(host_int);
 #else
   return (((host_int & uint32_t{0xFF}) << 24) |
           ((host_int & uint32_t{0xFF00}) << 8) |
@@ -98,33 +58,29 @@
 }
 
 inline uint16_t gbswap_16(uint16_t host_int) {
-#if defined(__GLIBC__)
-  return bswap_16(host_int);
+#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
+  return __builtin_bswap16(host_int);
+#elif defined(_MSC_VER)
+  return _byteswap_ushort(host_int);
 #else
   return (((host_int & uint16_t{0xFF}) << 8) |
           ((host_int & uint16_t{0xFF00}) >> 8));
 #endif
 }
 
-#endif  // intrinsics available
-
 #ifdef ABSL_IS_LITTLE_ENDIAN
 
-// Definitions for ntohl etc. that don't require us to include
-// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
-// than just #defining them because in debug mode, gcc doesn't
-// correctly handle the (rather involved) definitions of bswap_32.
-// gcc guarantees that inline functions are as fast as macros, so
-// this isn't a performance hit.
+// Portable definitions for htonl (host-to-network) and friends on little-endian
+// architectures.
 inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
 inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
 inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
 
 #elif defined ABSL_IS_BIG_ENDIAN
 
-// These definitions are simpler on big-endian machines
-// These are functions instead of macros to avoid self-assignment warnings
-// on calls such as "i = ghtnol(i);".  This also provides type checking.
+// Portable definitions for htonl (host-to-network) etc on big-endian
+// architectures. These definitions are simpler since the host byte order is the
+// same as network byte order.
 inline uint16_t ghtons(uint16_t x) { return x; }
 inline uint32_t ghtonl(uint32_t x) { return x; }
 inline uint64_t ghtonll(uint64_t x) { return x; }
@@ -173,6 +129,36 @@
 
 #endif /* ENDIAN */
 
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+  return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+  return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+  return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+  return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+  return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+  return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
 // Functions to do unaligned loads and stores in little-endian order.
 inline uint16_t Load16(const void *p) {
   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
@@ -233,6 +219,36 @@
 
 #endif /* ENDIAN */
 
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+  return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+  return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+  return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+  return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+  return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+  return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
 // Functions to do unaligned loads and stores in big-endian order.
 inline uint16_t Load16(const void *p) {
   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
diff --git a/abseil-cpp/absl/base/internal/exception_safety_testing.h b/abseil-cpp/absl/base/internal/exception_safety_testing.h
index 6ba89d0..c106154 100644
--- a/abseil-cpp/absl/base/internal/exception_safety_testing.h
+++ b/abseil-cpp/absl/base/internal/exception_safety_testing.h
@@ -536,7 +536,22 @@
   }
 
   // Memory management operators
-  // Args.. allows us to overload regular and placement new in one shot
+  static void* operator new(size_t s) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new(s);
+  }
+
+  static void* operator new[](size_t s) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new[](s);
+  }
+
   template <typename... Args>
   static void* operator new(size_t s, Args&&... args) noexcept(
       IsSpecified(TypeSpec::kNoThrowNew)) {
@@ -557,12 +572,6 @@
 
   // Abseil doesn't support throwing overloaded operator delete.  These are
   // provided so a throwing operator-new can clean up after itself.
-  //
-  // We provide both regular and templated operator delete because if only the
-  // templated version is provided as we did with operator new, the compiler has
-  // no way of knowing which overload of operator delete to call. See
-  // https://en.cppreference.com/w/cpp/memory/new/operator_delete and
-  // https://en.cppreference.com/w/cpp/language/delete for the gory details.
   void operator delete(void* p) noexcept { ::operator delete(p); }
 
   template <typename... Args>
@@ -726,9 +735,8 @@
 
   ThrowingAllocator select_on_container_copy_construction() noexcept(
       IsSpecified(AllocSpec::kNoThrowAllocate)) {
-    auto& out = *this;
     ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
-    return out;
+    return *this;
   }
 
   template <typename U>
@@ -938,7 +946,7 @@
  *   `std::unique_ptr<T> operator()() const` where T is the type being tested.
  *   It is used for reliably creating identical T instances to test on.
  *
- * - Operation: The operation object (passsed in via tester.WithOperation(...)
+ * - Operation: The operation object (passed in via tester.WithOperation(...)
  *   or tester.Test(...)) must be invocable with the signature
  *   `void operator()(T*) const` where T is the type being tested. It is used
  *   for performing steps on a T instance that may throw and that need to be
diff --git a/abseil-cpp/absl/base/internal/fast_type_id.h b/abseil-cpp/absl/base/internal/fast_type_id.h
index 3db59e8..a547b3a 100644
--- a/abseil-cpp/absl/base/internal/fast_type_id.h
+++ b/abseil-cpp/absl/base/internal/fast_type_id.h
@@ -28,8 +28,10 @@
   constexpr static char dummy_var = 0;
 };
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 template <typename Type>
 constexpr char FastTypeTag<Type>::dummy_var;
+#endif
 
 // FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
 // passed-in type. These are meant to be good match for keys into maps or
diff --git a/abseil-cpp/absl/base/internal/inline_variable.h b/abseil-cpp/absl/base/internal/inline_variable.h
index 130d8c2..df933fa 100644
--- a/abseil-cpp/absl/base/internal/inline_variable.h
+++ b/abseil-cpp/absl/base/internal/inline_variable.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
-#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
 
 #include <type_traits>
 
@@ -104,4 +104,4 @@
 
 #endif  // __cpp_inline_variables
 
-#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
diff --git a/abseil-cpp/absl/base/internal/inline_variable_testing.h b/abseil-cpp/absl/base/internal/inline_variable_testing.h
index 3856b9f..f3c8145 100644
--- a/abseil-cpp/absl/base/internal/inline_variable_testing.h
+++ b/abseil-cpp/absl/base/internal/inline_variable_testing.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
-#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
 
 #include "absl/base/internal/inline_variable.h"
 
@@ -43,4 +43,4 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
diff --git a/abseil-cpp/absl/base/internal/invoke.h b/abseil-cpp/absl/base/internal/invoke.h
index 5c71f32..643c2a4 100644
--- a/abseil-cpp/absl/base/internal/invoke.h
+++ b/abseil-cpp/absl/base/internal/invoke.h
@@ -14,6 +14,8 @@
 //
 // absl::base_internal::invoke(f, args...) is an implementation of
 // INVOKE(f, args...) from section [func.require] of the C++ standard.
+// When compiled as C++17 and later versions, it is implemented as an alias of
+// std::invoke.
 //
 // [func.require]
 // Define INVOKE (f, t1, t2, ..., tN) as follows:
@@ -35,6 +37,26 @@
 #ifndef ABSL_BASE_INTERNAL_INVOKE_H_
 #define ABSL_BASE_INTERNAL_INVOKE_H_
 
+#include "absl/base/config.h"
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
+#include <functional>
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+using std::invoke;
+using std::invoke_result_t;
+using std::is_invocable_r;
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#else  // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
 #include <algorithm>
 #include <type_traits>
 #include <utility>
@@ -80,8 +102,18 @@
   static decltype((std::declval<Obj>().*
                    std::declval<MemFun>())(std::declval<Args>()...))
   Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+// Ignore bogus GCC warnings on this line.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
     return (std::forward<Obj>(obj).*
             std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic pop
+#endif
   }
 };
 
@@ -180,8 +212,30 @@
   return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
                                            std::forward<Args>(args)...);
 }
+
+template <typename AlwaysVoid, typename, typename, typename...>
+struct IsInvocableRImpl : std::false_type {};
+
+template <typename R, typename F, typename... Args>
+struct IsInvocableRImpl<
+    absl::void_t<absl::base_internal::invoke_result_t<F, Args...> >, R, F,
+    Args...>
+    : std::integral_constant<
+          bool,
+          std::is_convertible<absl::base_internal::invoke_result_t<F, Args...>,
+                              R>::value ||
+              std::is_void<R>::value> {};
+
+// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
+// and either the return type is convertible to `R`, or `R` is void.
+// C++11-compatible version of `std::is_invocable_r`.
+template <typename R, typename F, typename... Args>
+using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;
+
 }  // namespace base_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
 #endif  // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/abseil-cpp/absl/base/internal/low_level_alloc.cc b/abseil-cpp/absl/base/internal/low_level_alloc.cc
index 229ab91..6d2cfea 100644
--- a/abseil-cpp/absl/base/internal/low_level_alloc.cc
+++ b/abseil-cpp/absl/base/internal/low_level_alloc.cc
@@ -42,25 +42,25 @@
 #include <windows.h>
 #endif
 
+#ifdef __linux__
+#include <sys/prctl.h>
+#endif
+
 #include <string.h>
+
 #include <algorithm>
 #include <atomic>
 #include <cerrno>
 #include <cstddef>
-#include <new>                   // for placement-new
+#include <new>  // for placement-new
 
 #include "absl/base/dynamic_annotations.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/spinlock.h"
 
-// MAP_ANONYMOUS
-#if defined(__APPLE__)
-// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
-// deprecated. In Darwin, MAP_ANON is all there is.
-#if !defined MAP_ANONYMOUS
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
 #define MAP_ANONYMOUS MAP_ANON
-#endif  // !MAP_ANONYMOUS
-#endif  // __APPLE__
+#endif
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -122,7 +122,7 @@
 static int Random(uint32_t *state) {
   uint32_t r = *state;
   int result = 1;
-  while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+  while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
     result++;
   }
   *state = r;
@@ -144,7 +144,7 @@
   size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
   int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
   if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
-  if (level > kMaxLevel-1) level = kMaxLevel - 1;
+  if (level > kMaxLevel - 1) level = kMaxLevel - 1;
   ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
   return level;
 }
@@ -153,8 +153,8 @@
 // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
 // points to the last element at level i in the AllocList less than *e, or is
 // head if no such element exists.
-static AllocList *LLA_SkiplistSearch(AllocList *head,
-                                     AllocList *e, AllocList **prev) {
+static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
+                                     AllocList **prev) {
   AllocList *p = head;
   for (int level = head->levels - 1; level >= 0; level--) {
     for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
@@ -190,7 +190,7 @@
     prev[i]->next[i] = e->next[i];
   }
   while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
-    head->levels--;   // reduce head->levels if level unused
+    head->levels--;  // reduce head->levels if level unused
   }
 }
 
@@ -249,9 +249,9 @@
 
 // Returns a global arena that does not call into hooks.  Used by NewArena()
 // when kCallMallocHook is not set.
-LowLevelAlloc::Arena* UnhookedArena() {
+LowLevelAlloc::Arena *UnhookedArena() {
   base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
-  return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
 }
 
 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
@@ -269,7 +269,7 @@
 // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
   base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
-  return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
 }
 
 // magic numbers to identify allocated and unallocated blocks
@@ -332,7 +332,7 @@
 #elif defined(__wasm__) || defined(__asmjs__)
   return getpagesize();
 #else
-  return sysconf(_SC_PAGESIZE);
+  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
 #endif
 }
 
@@ -356,15 +356,14 @@
       min_size(2 * round_up),
       random(0) {
   freelist.header.size = 0;
-  freelist.header.magic =
-      Magic(kMagicUnallocated, &freelist.header);
+  freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
   freelist.header.arena = this;
   freelist.levels = 0;
   memset(freelist.next, 0, sizeof(freelist.next));
 }
 
 // L < meta_data_arena->mu
-LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
   Arena *meta_data_arena = DefaultArena();
 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
   if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
@@ -375,7 +374,7 @@
     meta_data_arena = UnhookedArena();
   }
   Arena *result =
-    new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+      new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
   return result;
 }
 
@@ -480,8 +479,8 @@
     AllocList *prev[kMaxLevel];
     LLA_SkiplistDelete(&arena->freelist, n, prev);
     LLA_SkiplistDelete(&arena->freelist, a, prev);
-    a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
-                                   &arena->random);
+    a->levels =
+        LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
     LLA_SkiplistInsert(&arena->freelist, a, prev);
   }
 }
@@ -489,27 +488,27 @@
 // Adds block at location "v" to the free list
 // L >= arena->mu
 static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
-  AllocList *f = reinterpret_cast<AllocList *>(
-                        reinterpret_cast<char *>(v) - sizeof (f->header));
+  AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+                                               sizeof(f->header));
   ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
                  "bad magic number in AddToFreelist()");
   ABSL_RAW_CHECK(f->header.arena == arena,
                  "bad arena pointer in AddToFreelist()");
-  f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
-                                 &arena->random);
+  f->levels =
+      LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
   AllocList *prev[kMaxLevel];
   LLA_SkiplistInsert(&arena->freelist, f, prev);
   f->header.magic = Magic(kMagicUnallocated, &f->header);
-  Coalesce(f);                  // maybe coalesce with successor
-  Coalesce(prev[0]);            // maybe coalesce with predecessor
+  Coalesce(f);        // maybe coalesce with successor
+  Coalesce(prev[0]);  // maybe coalesce with predecessor
 }
 
 // Frees storage allocated by LowLevelAlloc::Alloc().
 // L < arena->mu
 void LowLevelAlloc::Free(void *v) {
   if (v != nullptr) {
-    AllocList *f = reinterpret_cast<AllocList *>(
-                        reinterpret_cast<char *>(v) - sizeof (f->header));
+    AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+                                                 sizeof(f->header));
     LowLevelAlloc::Arena *arena = f->header.arena;
     ArenaLock section(arena);
     AddToFreelist(v, arena);
@@ -524,21 +523,21 @@
 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
   void *result = nullptr;
   if (request != 0) {
-    AllocList *s;       // will point to region that satisfies request
+    AllocList *s;  // will point to region that satisfies request
     ArenaLock section(arena);
     // round up with header
-    size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
-                             arena->round_up);
-    for (;;) {      // loop until we find a suitable region
+    size_t req_rnd =
+        RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
+    for (;;) {  // loop until we find a suitable region
       // find the minimum levels that a block of this size must have
       int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
-      if (i < arena->freelist.levels) {   // potential blocks exist
+      if (i < arena->freelist.levels) {        // potential blocks exist
         AllocList *before = &arena->freelist;  // predecessor of s
         while ((s = Next(i, before, arena)) != nullptr &&
                s->header.size < req_rnd) {
           before = s;
         }
-        if (s != nullptr) {       // we found a region
+        if (s != nullptr) {  // we found a region
           break;
         }
       }
@@ -550,7 +549,7 @@
       size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
       void *new_pages;
 #ifdef _WIN32
-      new_pages = VirtualAlloc(0, new_pages_size,
+      new_pages = VirtualAlloc(nullptr, new_pages_size,
                                MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
       ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
 #else
@@ -570,6 +569,18 @@
         ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
       }
 
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+      // Attempt to name the allocated address range in /proc/$PID/smaps on
+      // Linux.
+      //
+      // This invocation of prctl() may fail if the Linux kernel was not
+      // configured with the CONFIG_ANON_VMA_NAME option.  This is OK since
+      // the naming of arenas is primarily a debugging aid.
+      prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
+            "absl");
+#endif
+#endif  // __linux__
 #endif  // _WIN32
       arena->mu.Lock();
       s = reinterpret_cast<AllocList *>(new_pages);
@@ -580,12 +591,12 @@
       AddToFreelist(&s->levels, arena);  // insert new region into free list
     }
     AllocList *prev[kMaxLevel];
-    LLA_SkiplistDelete(&arena->freelist, s, prev);    // remove from free list
+    LLA_SkiplistDelete(&arena->freelist, s, prev);  // remove from free list
     // s points to the first free region that's big enough
     if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
       // big enough to split
-      AllocList *n = reinterpret_cast<AllocList *>
-                        (req_rnd + reinterpret_cast<char *>(s));
+      AllocList *n =
+          reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
       n->header.size = s->header.size - req_rnd;
       n->header.magic = Magic(kMagicAllocated, &n->header);
       n->header.arena = arena;
diff --git a/abseil-cpp/absl/base/internal/low_level_alloc.h b/abseil-cpp/absl/base/internal/low_level_alloc.h
index db91951..c2f1f25 100644
--- a/abseil-cpp/absl/base/internal/low_level_alloc.h
+++ b/abseil-cpp/absl/base/internal/low_level_alloc.h
@@ -46,7 +46,8 @@
 // for more information.
 #ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
 #error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
-#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \
+    defined(__hexagon__)
 #define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
 #endif
 
@@ -103,7 +104,7 @@
   // the provided flags.  For example, the call NewArena(kAsyncSignalSafe)
   // is itself async-signal-safe, as well as generatating an arena that provides
   // async-signal-safe Alloc/Free.
-  static Arena *NewArena(int32_t flags);
+  static Arena *NewArena(uint32_t flags);
 
   // Destroys an arena allocated by NewArena and returns true,
   // provided no allocated blocks remain in the arena.
diff --git a/abseil-cpp/absl/base/internal/low_level_alloc_test.cc b/abseil-cpp/absl/base/internal/low_level_alloc_test.cc
index 2f2eaff..8fdec09 100644
--- a/abseil-cpp/absl/base/internal/low_level_alloc_test.cc
+++ b/abseil-cpp/absl/base/internal/low_level_alloc_test.cc
@@ -21,6 +21,10 @@
 #include <unordered_map>
 #include <utility>
 
+#ifdef __EMSCRIPTEN__
+#include <emscripten.h>
+#endif
+
 #include "absl/container/node_hash_map.h"
 
 namespace absl {
@@ -82,7 +86,7 @@
   AllocMap::iterator it;
   BlockDesc block_desc;
   int rnd;
-  LowLevelAlloc::Arena *arena = 0;
+  LowLevelAlloc::Arena *arena = nullptr;
   if (use_new_arena) {
     int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
     arena = LowLevelAlloc::NewArena(flags);
@@ -97,11 +101,10 @@
     case 0:     // coin came up heads: add a block
       using_low_level_alloc = true;
       block_desc.len = rand() & 0x3fff;
-      block_desc.ptr =
-        reinterpret_cast<char *>(
-                        arena == 0
-                        ? LowLevelAlloc::Alloc(block_desc.len)
-                        : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
+      block_desc.ptr = reinterpret_cast<char *>(
+          arena == nullptr
+              ? LowLevelAlloc::Alloc(block_desc.len)
+              : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
       using_low_level_alloc = false;
       RandomizeBlockDesc(&block_desc);
       rnd = rand();
@@ -158,5 +161,20 @@
 int main(int argc, char *argv[]) {
   // The actual test runs in the global constructor of `before_main`.
   printf("PASS\n");
+#ifdef __EMSCRIPTEN__
+  // clang-format off
+// This is JS here. Don't try to format it.
+    MAIN_THREAD_EM_ASM({
+      if (ENVIRONMENT_IS_WEB) {
+        if (typeof TEST_FINISH === 'function') {
+          TEST_FINISH($0);
+        } else {
+          console.error('Attempted to exit with status ' + $0);
+          console.error('But TEST_FINSIHED is not a function.');
+        }
+      }
+    }, 0);
+// clang-format on
+#endif
   return 0;
 }
diff --git a/abseil-cpp/absl/base/internal/low_level_scheduling.h b/abseil-cpp/absl/base/internal/low_level_scheduling.h
index 6ef79fb..9baccc0 100644
--- a/abseil-cpp/absl/base/internal/low_level_scheduling.h
+++ b/abseil-cpp/absl/base/internal/low_level_scheduling.h
@@ -61,6 +61,8 @@
  public:
   // Returns true iff the calling thread may be cooperatively rescheduled.
   static bool ReschedulingIsAllowed();
+  SchedulingGuard(const SchedulingGuard&) = delete;
+  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
 
  private:
   // Disable cooperative rescheduling of the calling thread.  It may still
@@ -101,9 +103,6 @@
   friend class SchedulingHelper;
   friend class SpinLock;
   friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
-
-  SchedulingGuard(const SchedulingGuard&) = delete;
-  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
 };
 
 //------------------------------------------------------------------------------
diff --git a/abseil-cpp/absl/base/internal/nullability_impl.h b/abseil-cpp/absl/base/internal/nullability_impl.h
new file mode 100644
index 0000000..36e1b33
--- /dev/null
+++ b/abseil-cpp/absl/base/internal/nullability_impl.h
@@ -0,0 +1,106 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+
+#include <memory>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+
+namespace nullability_internal {
+
+// `IsNullabilityCompatible` checks whether its first argument is a class
+// explicitly tagged as supporting nullability annotations. The tag is the type
+// declaration `absl_nullability_compatible`.
+template <typename, typename = void>
+struct IsNullabilityCompatible : std::false_type {};
+
+template <typename T>
+struct IsNullabilityCompatible<
+    T, absl::void_t<typename T::absl_nullability_compatible>> : std::true_type {
+};
+
+template <typename T>
+constexpr bool IsSupportedType = IsNullabilityCompatible<T>::value;
+
+template <typename T>
+constexpr bool IsSupportedType<T*> = true;
+
+template <typename T, typename U>
+constexpr bool IsSupportedType<T U::*> = true;
+
+template <typename T, typename... Deleter>
+constexpr bool IsSupportedType<std::unique_ptr<T, Deleter...>> = true;
+
+template <typename T>
+constexpr bool IsSupportedType<std::shared_ptr<T>> = true;
+
+template <typename T>
+struct EnableNullable {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+template <typename T>
+struct EnableNonnull {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+template <typename T>
+struct EnableNullabilityUnknown {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+// Note: we do not apply Clang nullability attributes (e.g. _Nullable).  These
+// only support raw pointers, and conditionally enabling them only for raw
+// pointers inhibits template arg deduction.  Ideally, they would support all
+// pointer-like types.
+template <typename T, typename = typename EnableNullable<T>::type>
+using NullableImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nullable")]]
+#endif
+    = T;
+
+template <typename T, typename = typename EnableNonnull<T>::type>
+using NonnullImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nonnull")]]
+#endif
+    = T;
+
+template <typename T, typename = typename EnableNullabilityUnknown<T>::type>
+using NullabilityUnknownImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nullability_Unspecified")]]
+#endif
+    = T;
+
+}  // namespace nullability_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
diff --git a/abseil-cpp/absl/base/internal/prefetch.h b/abseil-cpp/absl/base/internal/prefetch.h
new file mode 100644
index 0000000..aecfd87
--- /dev/null
+++ b/abseil-cpp/absl/base/internal/prefetch.h
@@ -0,0 +1,137 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// TODO(b/265984188): remove all uses and delete this header.
+
+#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_
+#define ABSL_BASE_INTERNAL_PREFETCH_H_
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/prefetch.h"
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE)
+#include <intrin.h>
+#pragma intrinsic(_mm_prefetch)
+#endif
+
+// Compatibility wrappers around __builtin_prefetch, to prefetch data
+// for read if supported by the toolchain.
+
+// Move data into the cache before it is read, or "prefetch" it.
+//
+// The value of `addr` is the address of the memory to prefetch. If
+// the target and compiler support it, data prefetch instructions are
+// generated. If the prefetch is done some time before the memory is
+// read, it may be in the cache by the time the read occurs.
+//
+// The function names specify the temporal locality heuristic applied,
+// using the names of Intel prefetch instructions:
+//
+//   T0 - high degree of temporal locality; data should be left in as
+//        many levels of the cache possible
+//   T1 - moderate degree of temporal locality
+//   T2 - low degree of temporal locality
+//   Nta - no temporal locality, data need not be left in the cache
+//         after the read
+//
+// Incorrect or gratuitous use of these functions can degrade
+// performance, so use them only when representative benchmarks show
+// an improvement.
+//
+// Example usage:
+//
+//   absl::base_internal::PrefetchT0(addr);
+//
+// Currently, the different prefetch calls behave on some Intel
+// architectures as follows:
+//
+//                 SNB..SKL   SKX
+// PrefetchT0()   L1/L2/L3  L1/L2
+// PrefetchT1()      L2/L3     L2
+// PrefetchT2()      L2/L3     L2
+// PrefetchNta()  L1/--/L3  L1*
+//
+// * On SKX PrefetchNta() will bring the line into L1 but will evict
+//   from L3 cache. This might result in surprising behavior.
+//
+// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon.
+//
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead")
+inline void PrefetchT0(const void* address) {
+  absl::PrefetchToLocalCache(address);
+}
+
+ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead")
+inline void PrefetchNta(const void* address) {
+  absl::PrefetchToLocalCacheNta(address);
+}
+
+ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead")
+void PrefetchT1(const void* addr);
+
+ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead")
+void PrefetchT2(const void* addr);
+
+// Implementation details follow.
+
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+
+#define ABSL_INTERNAL_HAVE_PREFETCH 1
+
+// See __builtin_prefetch:
+// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
+//
+// These functions speculatively load for read only. This is
+// safe for all currently supported platforms. However, prefetch for
+// store may have problems depending on the target platform.
+//
+inline void PrefetchT1(const void* addr) {
+  // Note: this uses prefetcht1 on Intel.
+  __builtin_prefetch(addr, 0, 2);
+}
+inline void PrefetchT2(const void* addr) {
+  // Note: this uses prefetcht2 on Intel.
+  __builtin_prefetch(addr, 0, 1);
+}
+
+#elif defined(ABSL_INTERNAL_HAVE_SSE)
+
+#define ABSL_INTERNAL_HAVE_PREFETCH 1
+
+inline void PrefetchT1(const void* addr) {
+  _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
+}
+inline void PrefetchT2(const void* addr) {
+  _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
+}
+
+#else
+inline void PrefetchT1(const void*) {}
+inline void PrefetchT2(const void*) {}
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_PREFETCH_H_
diff --git a/abseil-cpp/absl/base/internal/prefetch_test.cc b/abseil-cpp/absl/base/internal/prefetch_test.cc
new file mode 100644
index 0000000..7c1dae4
--- /dev/null
+++ b/abseil-cpp/absl/base/internal/prefetch_test.cc
@@ -0,0 +1,43 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/prefetch.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+int number = 42;
+
+TEST(Prefetch, TemporalLocalityNone) {
+  absl::base_internal::PrefetchNta(&number);
+  EXPECT_EQ(number, 42);
+}
+
+TEST(Prefetch, TemporalLocalityLow) {
+  absl::base_internal::PrefetchT2(&number);
+  EXPECT_EQ(number, 42);
+}
+
+TEST(Prefetch, TemporalLocalityMedium) {
+  absl::base_internal::PrefetchT1(&number);
+  EXPECT_EQ(number, 42);
+}
+
+TEST(Prefetch, TemporalLocalityHigh) {
+  absl::base_internal::PrefetchT0(&number);
+  EXPECT_EQ(number, 42);
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/base/internal/raw_logging.cc b/abseil-cpp/absl/base/internal/raw_logging.cc
index ae8754c..4c922cc 100644
--- a/abseil-cpp/absl/base/internal/raw_logging.cc
+++ b/abseil-cpp/absl/base/internal/raw_logging.cc
@@ -14,15 +14,21 @@
 
 #include "absl/base/internal/raw_logging.h"
 
-#include <stddef.h>
 #include <cstdarg>
+#include <cstddef>
 #include <cstdio>
 #include <cstdlib>
 #include <cstring>
+#include <string>
+
+#ifdef __EMSCRIPTEN__
+#include <emscripten/console.h>
+#endif
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/errno_saver.h"
 #include "absl/base/log_severity.h"
 
 // We know how to perform low-level writes to stderr in POSIX and Windows.  For
@@ -36,8 +42,8 @@
 // This preprocessor token is also defined in raw_io.cc.  If you need to copy
 // this, consider moving both to config.h instead.
 #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
-    defined(__Fuchsia__) || defined(__native_client__) || \
-    defined(__EMSCRIPTEN__) || defined(__ASYLO__)
+    defined(__Fuchsia__) || defined(__native_client__) ||               \
+    defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__)
 
 #include <unistd.h>
 
@@ -50,7 +56,8 @@
 // ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
 //   syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
 // for low level operations that want to avoid libc.
-#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \
+    !defined(__ANDROID__)
 #include <sys/syscall.h>
 #define ABSL_HAVE_SYSCALL_WRITE 1
 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
@@ -67,36 +74,35 @@
 #undef ABSL_HAVE_RAW_IO
 #endif
 
-// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
-// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
-// selected set of platforms for which we expect not to be able to raw log.
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_log_internal {
+namespace {
 
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
-    absl::raw_logging_internal::LogPrefixHook>
-    log_prefix_hook;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
-    absl::raw_logging_internal::AbortHook>
-    abort_hook;
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
+// a selected set of platforms for which we expect not to be able to raw log.
 
 #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
-static const char kTruncated[] = " ... (message truncated)\n";
+constexpr char kTruncated[] = " ... (message truncated)\n";
 
 // sprintf the format to the buffer, adjusting *buf and *size to reflect the
 // consumed bytes, and return whether the message fit without truncation.  If
 // truncation occurred, if possible leave room in the buffer for the message
 // kTruncated[].
-inline static bool VADoRawLog(char** buf, int* size, const char* format,
-                              va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0);
-inline static bool VADoRawLog(char** buf, int* size,
-                              const char* format, va_list ap) {
-  int n = vsnprintf(*buf, *size, format, ap);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
+    ABSL_PRINTF_ATTRIBUTE(3, 0);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
+  if (*size < 0)
+    return false;
+  int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap);
   bool result = true;
   if (n < 0 || n > *size) {
     result = false;
     if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
-      n = *size - sizeof(kTruncated);  // room for truncation message
+      n = *size - static_cast<int>(sizeof(kTruncated));
     } else {
-      n = 0;                           // no room for truncation message
+      n = 0;  // no room for truncation message
     }
   }
   *size -= n;
@@ -105,9 +111,7 @@
 }
 #endif  // ABSL_LOW_LEVEL_WRITE_SUPPORTED
 
-static constexpr int kLogBufSize = 3000;
-
-namespace {
+constexpr int kLogBufSize = 3000;
 
 // CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
 // that invoke malloc() and getenv() that might acquire some locks.
@@ -118,9 +122,11 @@
 bool DoRawLog(char** buf, int* size, const char* format, ...)
     ABSL_PRINTF_ATTRIBUTE(3, 4);
 bool DoRawLog(char** buf, int* size, const char* format, ...) {
+  if (*size < 0)
+    return false;
   va_list ap;
   va_start(ap, format);
-  int n = vsnprintf(*buf, *size, format, ap);
+  int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap);
   va_end(ap);
   if (n < 0 || n > *size) return false;
   *size -= n;
@@ -128,6 +134,18 @@
   return true;
 }
 
+bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line,
+                               char** buf, int* buf_size) {
+  DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line);
+  return true;
+}
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<LogFilterAndPrefixHook>
+    log_filter_and_prefix_hook(DefaultLogFilterAndPrefix);
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<AbortHook> abort_hook;
+
 void RawLogVA(absl::LogSeverity severity, const char* file, int line,
               const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
 void RawLogVA(absl::LogSeverity severity, const char* file, int line,
@@ -148,14 +166,7 @@
   }
 #endif
 
-  auto log_prefix_hook_ptr = log_prefix_hook.Load();
-  if (log_prefix_hook_ptr) {
-    enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
-  } else {
-    if (enabled) {
-      DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
-    }
-  }
+  enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size);
   const char* const prefix_end = buf;
 
 #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
@@ -166,11 +177,12 @@
     } else {
       DoRawLog(&buf, &size, "%s", kTruncated);
     }
-    absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer));
+    AsyncSignalSafeWriteError(buffer, strlen(buffer));
   }
 #else
   static_cast<void>(format);
   static_cast<void>(ap);
+  static_cast<void>(enabled);
 #endif
 
   // Abort the process after logging a FATAL message, even if the output itself
@@ -181,18 +193,53 @@
   }
 }
 
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
+                        const std::string& message) {
+  RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
+         message.data());
+}
+
 }  // namespace
 
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace raw_logging_internal {
-void SafeWriteToStderr(const char *s, size_t len) {
-#if defined(ABSL_HAVE_SYSCALL_WRITE)
+void AsyncSignalSafeWriteError(const char* s, size_t len) {
+  if (!len) return;
+  absl::base_internal::ErrnoSaver errno_saver;
+#if defined(__EMSCRIPTEN__)
+  // In WebAssembly, bypass filesystem emulation via fwrite.
+  if (s[len - 1] == '\n') {
+    // Skip a trailing newline character as emscripten_errn adds one itself.
+    len--;
+  }
+  // emscripten_errn was introduced in 3.1.41 but broken in standalone mode
+  // until 3.1.43.
+#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
+  emscripten_errn(s, len);
+#else
+  char buf[kLogBufSize];
+  if (len >= kLogBufSize) {
+    len = kLogBufSize - 1;
+    constexpr size_t trunc_len = sizeof(kTruncated) - 2;
+    memcpy(buf + len - trunc_len, kTruncated, trunc_len);
+    buf[len] = '\0';
+    len -= trunc_len;
+  } else {
+    buf[len] = '\0';
+  }
+  memcpy(buf, s, len);
+  _emscripten_err(buf);
+#endif
+#elif defined(ABSL_HAVE_SYSCALL_WRITE)
+  // We prefer calling write via `syscall` to minimize the risk of libc doing
+  // something "helpful".
   syscall(SYS_write, STDERR_FILENO, s, len);
 #elif defined(ABSL_HAVE_POSIX_WRITE)
   write(STDERR_FILENO, s, len);
 #elif defined(ABSL_HAVE_RAW_IO)
-  _write(/* stderr */ 2, s, len);
+  _write(/* stderr */ 2, s, static_cast<unsigned>(len));
 #else
   // stderr logging unsupported on this platform
   (void) s;
@@ -201,8 +248,6 @@
 }
 
 void RawLog(absl::LogSeverity severity, const char* file, int line,
-            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
-void RawLog(absl::LogSeverity severity, const char* file, int line,
             const char* format, ...) {
   va_list ap;
   va_start(ap, format);
@@ -210,15 +255,6 @@
   va_end(ap);
 }
 
-// Non-formatting version of RawLog().
-//
-// TODO(gfalcon): When string_view no longer depends on base, change this
-// interface to take its message as a string_view instead.
-static void DefaultInternalLog(absl::LogSeverity severity, const char* file,
-                               int line, const std::string& message) {
-  RawLog(severity, file, line, "%s", message.c_str());
-}
-
 bool RawLoggingFullySupported() {
 #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
   return true;
@@ -231,10 +267,16 @@
     absl::base_internal::AtomicHook<InternalLogFunction>
         internal_log_function(DefaultInternalLog);
 
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) {
+  log_filter_and_prefix_hook.Store(func);
+}
+
+void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
+
 void RegisterInternalLogFunction(InternalLogFunction func) {
   internal_log_function.Store(func);
 }
 
-}  // namespace raw_logging_internal
+}  // namespace raw_log_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/raw_logging.h b/abseil-cpp/absl/base/internal/raw_logging.h
index 2508f3c..b79550b 100644
--- a/abseil-cpp/absl/base/internal/raw_logging.h
+++ b/abseil-cpp/absl/base/internal/raw_logging.h
@@ -43,12 +43,12 @@
 
 #define ABSL_RAW_LOG(severity, ...)                                            \
   do {                                                                         \
-    constexpr const char* absl_raw_logging_internal_basename =                 \
-        ::absl::raw_logging_internal::Basename(__FILE__,                       \
-                                               sizeof(__FILE__) - 1);          \
-    ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
-                                         absl_raw_logging_internal_basename,   \
-                                         __LINE__, __VA_ARGS__);               \
+    constexpr const char* absl_raw_log_internal_basename =                     \
+        ::absl::raw_log_internal::Basename(__FILE__, sizeof(__FILE__) - 1);    \
+    ::absl::raw_log_internal::RawLog(ABSL_RAW_LOG_INTERNAL_##severity,         \
+                                     absl_raw_log_internal_basename, __LINE__, \
+                                     __VA_ARGS__);                             \
+    ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity;                        \
   } while (0)
 
 // Similar to CHECK(condition) << message, but for low-level modules:
@@ -72,12 +72,13 @@
 //
 // The API is a subset of the above: each macro only takes two arguments.  Use
 // StrCat if you need to build a richer message.
-#define ABSL_INTERNAL_LOG(severity, message)                             \
-  do {                                                                   \
-    constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
-    ::absl::raw_logging_internal::internal_log_function(                 \
-        ABSL_RAW_LOGGING_INTERNAL_##severity,                            \
-        absl_raw_logging_internal_filename, __LINE__, message);          \
+#define ABSL_INTERNAL_LOG(severity, message)                              \
+  do {                                                                    \
+    constexpr const char* absl_raw_log_internal_filename = __FILE__;      \
+    ::absl::raw_log_internal::internal_log_function(                      \
+        ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, \
+        __LINE__, message);                                               \
+    ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity;                   \
   } while (0)
 
 #define ABSL_INTERNAL_CHECK(condition, message)                    \
@@ -89,16 +90,36 @@
     }                                                              \
   } while (0)
 
-#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
-#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
-#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
-#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
-#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
+#ifndef NDEBUG
+
+#define ABSL_RAW_DLOG(severity, ...) ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define ABSL_RAW_DCHECK(condition, message) ABSL_RAW_CHECK(condition, message)
+
+#else  // NDEBUG
+
+#define ABSL_RAW_DLOG(severity, ...)                   \
+  while (false) ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define ABSL_RAW_DCHECK(condition, message) \
+  while (false) ABSL_RAW_CHECK(condition, message)
+
+#endif  // NDEBUG
+
+#define ABSL_RAW_LOG_INTERNAL_INFO ::absl::LogSeverity::kInfo
+#define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning
+#define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError
+#define ABSL_RAW_LOG_INTERNAL_FATAL ::absl::LogSeverity::kFatal
+#define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \
   ::absl::NormalizeLogSeverity(severity)
 
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_INFO
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL ABSL_UNREACHABLE()
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity)
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace raw_logging_internal {
+namespace raw_log_internal {
 
 // Helper function to implement ABSL_RAW_LOG
 // Logs format... at "severity" level, reporting it
@@ -107,12 +128,9 @@
 void RawLog(absl::LogSeverity severity, const char* file, int line,
             const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
 
-// Writes the provided buffer directly to stderr, in a safe, low-level manner.
-//
-// In POSIX this means calling write(), which is async-signal safe and does
-// not malloc.  If the platform supports the SYS_write syscall, we invoke that
-// directly to side-step any libc interception.
-void SafeWriteToStderr(const char *s, size_t len);
+// Writes the provided buffer directly to stderr, in a signal-safe, low-level
+// manner.  Preserves errno.
+void AsyncSignalSafeWriteError(const char* s, size_t len);
 
 // compile-time function to get the "base" filename, that is, the part of
 // a filename after the last "/" or "\" path separator.  The search starts at
@@ -131,7 +149,7 @@
 // TODO(gfalcon): Come up with a better name for this method.
 bool RawLoggingFullySupported();
 
-// Function type for a raw_logging customization hook for suppressing messages
+// Function type for a raw_log customization hook for suppressing messages
 // by severity, and for writing custom prefixes on non-suppressed messages.
 //
 // The installed hook is called for every raw log invocation.  The message will
@@ -140,19 +158,20 @@
 // also provided with an output buffer, where it can write a custom log message
 // prefix.
 //
-// The raw_logging system does not allocate memory or grab locks.  User-provided
+// The raw_log system does not allocate memory or grab locks.  User-provided
 // hooks must avoid these operations, and must not throw exceptions.
 //
 // 'severity' is the severity level of the message being written.
 // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
 // was located.
-// 'buffer' and 'buf_size' are pointers to the buffer and buffer size.  If the
-// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// 'buf' and 'buf_size' are pointers to the buffer and buffer size.  If the
+// hook writes a prefix, it must increment *buf and decrement *buf_size
 // accordingly.
-using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
-                               int line, char** buffer, int* buf_size);
+using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity,
+                                        const char* file, int line, char** buf,
+                                        int* buf_size);
 
-// Function type for a raw_logging customization hook called to abort a process
+// Function type for a raw_log customization hook called to abort a process
 // when a FATAL message is logged.  If the provided AbortHook() returns, the
 // logging system will call abort().
 //
@@ -160,7 +179,10 @@
 // was located.
 // The NUL-terminated logged message lives in the buffer between 'buf_start'
 // and 'buf_end'.  'prefix_end' points to the first non-prefix character of the
-// buffer (as written by the LogPrefixHook.)
+// buffer (as written by the LogFilterAndPrefixHook.)
+//
+// The lifetime of the filename and message buffers will not end while the
+// process remains alive.
 using AbortHook = void (*)(const char* file, int line, const char* buf_start,
                            const char* prefix_end, const char* buf_end);
 
@@ -176,9 +198,17 @@
     InternalLogFunction>
     internal_log_function;
 
+// Registers hooks of the above types.  Only a single hook of each type may be
+// registered.  It is an error to call these functions multiple times with
+// different input arguments.
+//
+// These functions are safe to call at any point during initialization; they do
+// not block or malloc, and are async-signal safe.
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func);
+void RegisterAbortHook(AbortHook func);
 void RegisterInternalLogFunction(InternalLogFunction func);
 
-}  // namespace raw_logging_internal
+}  // namespace raw_log_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/abseil-cpp/absl/base/internal/spinlock.cc b/abseil-cpp/absl/base/internal/spinlock.cc
index a7d44f3..381b913 100644
--- a/abseil-cpp/absl/base/internal/spinlock.cc
+++ b/abseil-cpp/absl/base/internal/spinlock.cc
@@ -19,6 +19,7 @@
 #include <limits>
 
 #include "absl/base/attributes.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/atomic_hook.h"
 #include "absl/base/internal/cycleclock.h"
 #include "absl/base/internal/spinlock_wait.h"
@@ -66,12 +67,14 @@
   submit_profile_data.Store(fn);
 }
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 // Static member variable definitions.
 constexpr uint32_t SpinLock::kSpinLockHeld;
 constexpr uint32_t SpinLock::kSpinLockCooperative;
 constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
 constexpr uint32_t SpinLock::kSpinLockSleeper;
 constexpr uint32_t SpinLock::kWaitTimeMask;
+#endif
 
 // Uncommon constructors.
 SpinLock::SpinLock(base_internal::SchedulingMode mode)
@@ -125,8 +128,9 @@
     // it as having a sleeper.
     if ((lock_value & kWaitTimeMask) == 0) {
       // Here, just "mark" that the thread is going to sleep.  Don't store the
-      // lock wait time in the lock as that will cause the current lock
-      // owner to think it experienced contention.
+      // lock wait time in the lock -- the lock word stores the amount of time
+      // that the current holder waited before acquiring the lock, not the wait
+      // time of any thread currently waiting to acquire it.
       if (lockword_.compare_exchange_strong(
               lock_value, lock_value | kSpinLockSleeper,
               std::memory_order_relaxed, std::memory_order_relaxed)) {
@@ -140,6 +144,14 @@
         // this thread obtains the lock.
         lock_value = TryLockInternal(lock_value, wait_cycles);
         continue;   // Skip the delay at the end of the loop.
+      } else if ((lock_value & kWaitTimeMask) == 0) {
+        // The lock is still held, without a waiter being marked, but something
+        // else about the lock word changed, causing our CAS to fail. For
+        // example, a new lock holder may have acquired the lock with
+        // kSpinLockDisabledScheduling set, whereas the previous holder had not
+        // set that flag. In this case, attempt again to mark ourselves as a
+        // waiter.
+        continue;
       }
     }
 
@@ -166,7 +178,7 @@
   // reserve a unitary wait time to represent that a waiter exists without our
   // own acquisition having been contended.
   if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
-    const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+    const int64_t wait_cycles = DecodeWaitCycles(lock_value);
     ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
     submit_profile_data(this, wait_cycles);
     ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
@@ -208,9 +220,9 @@
   return clamped;
 }
 
-uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
   // Cast to uint32_t first to ensure bits [63:32] are cleared.
-  const uint64_t scaled_wait_time =
+  const int64_t scaled_wait_time =
       static_cast<uint32_t>(lock_value & kWaitTimeMask);
   return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
 }
diff --git a/abseil-cpp/absl/base/internal/spinlock.h b/abseil-cpp/absl/base/internal/spinlock.h
index e6ac9e6..09ba582 100644
--- a/abseil-cpp/absl/base/internal/spinlock.h
+++ b/abseil-cpp/absl/base/internal/spinlock.h
@@ -15,25 +15,22 @@
 //
 
 //  Most users requiring mutual exclusion should use Mutex.
-//  SpinLock is provided for use in three situations:
-//   - for use in code that Mutex itself depends on
-//   - to get a faster fast-path release under low contention (without an
-//     atomic read-modify-write) In return, SpinLock has worse behaviour under
-//     contention, which is why Mutex is preferred in most situations.
+//  SpinLock is provided for use in two situations:
+//   - for use by Abseil internal code that Mutex itself depends on
 //   - for async signal safety (see below)
 
 // SpinLock is async signal safe.  If a spinlock is used within a signal
 // handler, all code that acquires the lock must ensure that the signal cannot
 // arrive while they are holding the lock.  Typically, this is done by blocking
 // the signal.
+//
+// Threads waiting on a SpinLock may be woken in an arbitrary order.
 
 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
 
-#include <stdint.h>
-#include <sys/types.h>
-
 #include <atomic>
+#include <cstdint>
 
 #include "absl/base/attributes.h"
 #include "absl/base/const_init.h"
@@ -42,8 +39,6 @@
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/scheduling_mode.h"
 #include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/macros.h"
-#include "absl/base/port.h"
 #include "absl/base/thread_annotations.h"
 
 namespace absl {
@@ -121,6 +116,14 @@
     return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
   }
 
+  // Return immediately if this thread holds the SpinLock exclusively.
+  // Otherwise, report an error by crashing with a diagnostic.
+  inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
+    if (!IsHeld()) {
+      ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
+    }
+  }
+
  protected:
   // These should not be exported except for testing.
 
@@ -130,7 +133,7 @@
                                    int64_t wait_end_time);
 
   // Extract number of wait cycles in a lock value.
-  static uint64_t DecodeWaitCycles(uint32_t lock_value);
+  static int64_t DecodeWaitCycles(uint32_t lock_value);
 
   // Provide access to protected method above.  Use for testing only.
   friend struct SpinLockTest;
@@ -140,8 +143,20 @@
   //
   // bit[0] encodes whether a lock is being held.
   // bit[1] encodes whether a lock uses cooperative scheduling.
-  // bit[2] encodes whether a lock disables scheduling.
+  // bit[2] encodes whether the current lock holder disabled scheduling when
+  //        acquiring the lock. Only set when kSpinLockHeld is also set.
   // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+  //        This is set by the lock holder to indicate how long it waited on
+  //        the lock before eventually acquiring it. The number of cycles is
+  //        encoded as a 29-bit unsigned int, or in the case that the current
+  //        holder did not wait but another waiter is queued, the LSB
+  //        (kSpinLockSleeper) is set. The implementation does not explicitly
+  //        track the number of queued waiters beyond this. It must always be
+  //        assumed that waiters may exist if the current holder was required to
+  //        queue.
+  //
+  // Invariant: if the lock is not held, the value is either 0 or
+  // kSpinLockCooperative.
   static constexpr uint32_t kSpinLockHeld = 1;
   static constexpr uint32_t kSpinLockCooperative = 2;
   static constexpr uint32_t kSpinLockDisabledScheduling = 4;
diff --git a/abseil-cpp/absl/base/internal/spinlock_akaros.inc b/abseil-cpp/absl/base/internal/spinlock_akaros.inc
index bc46894..7b0cada 100644
--- a/abseil-cpp/absl/base/internal/spinlock_akaros.inc
+++ b/abseil-cpp/absl/base/internal/spinlock_akaros.inc
@@ -20,7 +20,7 @@
 
 extern "C" {
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
     std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
     int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
   // In Akaros, one must take care not to call anything that could cause a
@@ -29,7 +29,7 @@
   // arbitrary code.
 }
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
     std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
 
 }  // extern "C"
diff --git a/abseil-cpp/absl/base/internal/spinlock_linux.inc b/abseil-cpp/absl/base/internal/spinlock_linux.inc
index e31c6ed..fe8ba67 100644
--- a/abseil-cpp/absl/base/internal/spinlock_linux.inc
+++ b/abseil-cpp/absl/base/internal/spinlock_linux.inc
@@ -56,18 +56,15 @@
 
 extern "C" {
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
-    std::atomic<uint32_t> *w, uint32_t value, int loop,
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t> *w, uint32_t value, int,
     absl::base_internal::SchedulingMode) {
   absl::base_internal::ErrnoSaver errno_saver;
-  struct timespec tm;
-  tm.tv_sec = 0;
-  tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
-  syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
+  syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr);
 }
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w,
-                                                  bool all) {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+    std::atomic<uint32_t> *w, bool all) {
   syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
 }
 
diff --git a/abseil-cpp/absl/base/internal/spinlock_posix.inc b/abseil-cpp/absl/base/internal/spinlock_posix.inc
index fcd21b1..4f6f887 100644
--- a/abseil-cpp/absl/base/internal/spinlock_posix.inc
+++ b/abseil-cpp/absl/base/internal/spinlock_posix.inc
@@ -25,7 +25,7 @@
 
 extern "C" {
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
     std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
     absl::base_internal::SchedulingMode /* mode */) {
   absl::base_internal::ErrnoSaver errno_saver;
@@ -40,7 +40,7 @@
   }
 }
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
     std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
 
 }  // extern "C"
diff --git a/abseil-cpp/absl/base/internal/spinlock_wait.h b/abseil-cpp/absl/base/internal/spinlock_wait.h
index 169bc74..9a1adcd 100644
--- a/abseil-cpp/absl/base/internal/spinlock_wait.h
+++ b/abseil-cpp/absl/base/internal/spinlock_wait.h
@@ -39,22 +39,22 @@
 // satisfying 0<=i<n && trans[i].done, atomically make the transition,
 // then return the old value of *w.   Make any other atomic transitions
 // where !trans[i].done, but continue waiting.
+//
+// Wakeups for threads blocked on SpinLockWait do not respect priorities.
 uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
                       const SpinLockWaitTransition trans[],
                       SchedulingMode scheduling_mode);
 
-// If possible, wake some thread that has called SpinLockDelay(w, ...). If
-// "all" is true, wake all such threads.  This call is a hint, and on some
-// systems it may be a no-op; threads calling SpinLockDelay() will always wake
-// eventually even if SpinLockWake() is never called.
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
 void SpinLockWake(std::atomic<uint32_t> *w, bool all);
 
 // Wait for an appropriate spin delay on iteration "loop" of a
 // spin loop on location *w, whose previously observed value was "value".
 // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
-// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
-// In all cases, it must return in bounded time even if SpinLockWake() is not
-// called.
+// or may wait for a call to SpinLockWake(w).
 void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
                    base_internal::SchedulingMode scheduling_mode);
 
@@ -73,21 +73,23 @@
 // By changing our extension points to be extern "C", we dodge this
 // check.
 extern "C" {
-void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all);
-void AbslInternalSpinLockDelay(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
+                                                      bool all);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
     std::atomic<uint32_t> *w, uint32_t value, int loop,
     absl::base_internal::SchedulingMode scheduling_mode);
 }
 
 inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
                                               bool all) {
-  AbslInternalSpinLockWake(w, all);
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
 }
 
 inline void absl::base_internal::SpinLockDelay(
     std::atomic<uint32_t> *w, uint32_t value, int loop,
     absl::base_internal::SchedulingMode scheduling_mode) {
-  AbslInternalSpinLockDelay(w, value, loop, scheduling_mode);
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
+  (w, value, loop, scheduling_mode);
 }
 
 #endif  // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/abseil-cpp/absl/base/internal/spinlock_win32.inc b/abseil-cpp/absl/base/internal/spinlock_win32.inc
index 78654b5..934c201 100644
--- a/abseil-cpp/absl/base/internal/spinlock_win32.inc
+++ b/abseil-cpp/absl/base/internal/spinlock_win32.inc
@@ -20,18 +20,21 @@
 
 extern "C" {
 
-void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
-                               uint32_t /* value */, int loop,
-                               absl::base_internal::SchedulingMode /* mode */) {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+    absl::base_internal::SchedulingMode /* mode */) {
   if (loop == 0) {
   } else if (loop == 1) {
     Sleep(0);
   } else {
-    Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
+    // SpinLockSuggestedDelayNS() always returns a positive integer, so this
+    // static_cast is safe.
+    Sleep(static_cast<DWORD>(
+        absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000));
   }
 }
 
-void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */,
-                              bool /* all */) {}
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
 
 }  // extern "C"
diff --git a/abseil-cpp/absl/base/internal/strerror.cc b/abseil-cpp/absl/base/internal/strerror.cc
index d66ba12..de91c05 100644
--- a/abseil-cpp/absl/base/internal/strerror.cc
+++ b/abseil-cpp/absl/base/internal/strerror.cc
@@ -51,7 +51,6 @@
 }
 
 std::string StrErrorInternal(int errnum) {
-  absl::base_internal::ErrnoSaver errno_saver;
   char buf[100];
   const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
   if (*str == '\0') {
@@ -67,8 +66,8 @@
 
 std::array<std::string, kSysNerr>* NewStrErrorTable() {
   auto* table = new std::array<std::string, kSysNerr>;
-  for (int i = 0; i < static_cast<int>(table->size()); ++i) {
-    (*table)[i] = StrErrorInternal(i);
+  for (size_t i = 0; i < table->size(); ++i) {
+    (*table)[i] = StrErrorInternal(static_cast<int>(i));
   }
   return table;
 }
@@ -76,9 +75,10 @@
 }  // namespace
 
 std::string StrError(int errnum) {
+  absl::base_internal::ErrnoSaver errno_saver;
   static const auto* table = NewStrErrorTable();
-  if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
-    return (*table)[errnum];
+  if (errnum >= 0 && static_cast<size_t>(errnum) < table->size()) {
+    return (*table)[static_cast<size_t>(errnum)];
   }
   return StrErrorInternal(errnum);
 }
diff --git a/abseil-cpp/absl/base/internal/strerror_test.cc b/abseil-cpp/absl/base/internal/strerror_test.cc
index a53da97..e32d5b5 100644
--- a/abseil-cpp/absl/base/internal/strerror_test.cc
+++ b/abseil-cpp/absl/base/internal/strerror_test.cc
@@ -62,12 +62,14 @@
       ++counter;
       errno = ERANGE;
       const std::string value = absl::base_internal::StrError(i);
+      // EXPECT_* could change errno. Stash it first.
+      int check_err = errno;
+      EXPECT_THAT(check_err, Eq(ERANGE));
       // Only the GNU implementation is guaranteed to provide the
       // string "Unknown error nnn". POSIX doesn't say anything.
       if (!absl::StartsWith(value, "Unknown error ")) {
-        EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i]));
+        EXPECT_THAT(value, Eq(expected_strings[i]));
       }
-      EXPECT_THAT(errno, Eq(ERANGE));
     }
   };
 
diff --git a/abseil-cpp/absl/base/internal/sysinfo.cc b/abseil-cpp/absl/base/internal/sysinfo.cc
index 349d926..8bcc4fa 100644
--- a/abseil-cpp/absl/base/internal/sysinfo.cc
+++ b/abseil-cpp/absl/base/internal/sysinfo.cc
@@ -41,6 +41,7 @@
 #include <string.h>
 
 #include <cassert>
+#include <cerrno>
 #include <cstdint>
 #include <cstdio>
 #include <cstdlib>
@@ -61,14 +62,82 @@
 ABSL_NAMESPACE_BEGIN
 namespace base_internal {
 
+namespace {
+
+#if defined(_WIN32)
+
+// Returns number of bits set in `bitMask`
+DWORD Win32CountSetBits(ULONG_PTR bitMask) {
+  for (DWORD bitSetCount = 0; ; ++bitSetCount) {
+    if (bitMask == 0) return bitSetCount;
+    bitMask &= bitMask - 1;
+  }
+}
+
+// Returns the number of logical CPUs using GetLogicalProcessorInformation(), or
+// 0 if the number of processors is not available or can not be computed.
+// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
+int Win32NumCPUs() {
+#pragma comment(lib, "kernel32.lib")
+  using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+
+  DWORD info_size = sizeof(Info);
+  Info* info(static_cast<Info*>(malloc(info_size)));
+  if (info == nullptr) return 0;
+
+  bool success = GetLogicalProcessorInformation(info, &info_size);
+  if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
+    free(info);
+    info = static_cast<Info*>(malloc(info_size));
+    if (info == nullptr) return 0;
+    success = GetLogicalProcessorInformation(info, &info_size);
+  }
+
+  DWORD logicalProcessorCount = 0;
+  if (success) {
+    Info* ptr = info;
+    DWORD byteOffset = 0;
+    while (byteOffset + sizeof(Info) <= info_size) {
+      switch (ptr->Relationship) {
+        case RelationProcessorCore:
+          logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask);
+          break;
+
+        case RelationNumaNode:
+        case RelationCache:
+        case RelationProcessorPackage:
+          // Ignore other entries
+          break;
+
+        default:
+          // Ignore unknown entries
+          break;
+      }
+      byteOffset += sizeof(Info);
+      ptr++;
+    }
+  }
+  free(info);
+  return static_cast<int>(logicalProcessorCount);
+}
+
+#endif
+
+}  // namespace
+
 static int GetNumCPUs() {
 #if defined(__myriad2__)
   return 1;
+#elif defined(_WIN32)
+  const int hardware_concurrency = Win32NumCPUs();
+  return hardware_concurrency ? hardware_concurrency : 1;
+#elif defined(_AIX)
+  return sysconf(_SC_NPROCESSORS_ONLN);
 #else
   // Other possibilities:
   //  - Read /sys/devices/system/cpu/online and use cpumask_parse()
   //  - sysconf(_SC_NPROCESSORS_ONLN)
-  return std::thread::hardware_concurrency();
+  return static_cast<int>(std::thread::hardware_concurrency());
 #endif
 }
 
@@ -91,7 +160,7 @@
     DWORD type = 0;
     DWORD data = 0;
     DWORD data_size = sizeof(data);
-    auto result = RegQueryValueExA(key, "~MHz", 0, &type,
+    auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
                                    reinterpret_cast<LPBYTE>(&data), &data_size);
     RegCloseKey(key);
     if (result == ERROR_SUCCESS && type == REG_DWORD &&
@@ -121,12 +190,21 @@
 // and the memory location pointed to by value is set to the value read.
 static bool ReadLongFromFile(const char *file, long *value) {
   bool ret = false;
-  int fd = open(file, O_RDONLY);
+#if defined(_POSIX_C_SOURCE)
+  const int file_mode = (O_RDONLY | O_CLOEXEC);
+#else
+  const int file_mode = O_RDONLY;
+#endif
+
+  int fd = open(file, file_mode);
   if (fd != -1) {
     char line[1024];
     char *err;
     memset(line, '\0', sizeof(line));
-    int len = read(fd, line, sizeof(line) - 1);
+    ssize_t len;
+    do {
+      len = read(fd, line, sizeof(line) - 1);
+    } while (len < 0 && errno == EINTR);
     if (len <= 0) {
       ret = false;
     } else {
@@ -154,8 +232,8 @@
   int rc = clock_gettime(CLOCK_MONOTONIC, &t);
 #endif
   if (rc != 0) {
-    perror("clock_gettime() failed");
-    abort();
+    ABSL_INTERNAL_LOG(
+        FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
   }
   return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
 }
@@ -308,7 +386,7 @@
 #endif
 
 pid_t GetTID() {
-  return syscall(SYS_gettid);
+  return static_cast<pid_t>(syscall(SYS_gettid));
 }
 
 #elif defined(__akaros__)
@@ -343,81 +421,33 @@
   return tid;
 }
 
+#elif defined(__APPLE__)
+
+pid_t GetTID() {
+  uint64_t tid;
+  // `nullptr` here implies this thread.  This only fails if the specified
+  // thread is invalid or the pointer-to-tid is null, so we needn't worry about
+  // it.
+  pthread_threadid_np(nullptr, &tid);
+  return static_cast<pid_t>(tid);
+}
+
+#elif defined(__native_client__)
+
+pid_t GetTID() {
+  auto* thread = pthread_self();
+  static_assert(sizeof(pid_t) == sizeof(thread),
+                "In NaCL int expected to be the same size as a pointer");
+  return reinterpret_cast<pid_t>(thread);
+}
+
 #else
 
-// Fallback implementation of GetTID using pthread_getspecific.
-ABSL_CONST_INIT static once_flag tid_once;
-ABSL_CONST_INIT static pthread_key_t tid_key;
-ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock(
-    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// We set a bit per thread in this array to indicate that an ID is in
-// use. ID 0 is unused because it is the default value returned by
-// pthread_getspecific().
-ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
-    ABSL_GUARDED_BY(tid_lock) = nullptr;
-static constexpr int kBitsPerWord = 32;  // tid_array is uint32_t.
-
-// Returns the TID to tid_array.
-static void FreeTID(void *v) {
-  intptr_t tid = reinterpret_cast<intptr_t>(v);
-  int word = tid / kBitsPerWord;
-  uint32_t mask = ~(1u << (tid % kBitsPerWord));
-  absl::base_internal::SpinLockHolder lock(&tid_lock);
-  assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
-  (*tid_array)[word] &= mask;
-}
-
-static void InitGetTID() {
-  if (pthread_key_create(&tid_key, FreeTID) != 0) {
-    // The logging system calls GetTID() so it can't be used here.
-    perror("pthread_key_create failed");
-    abort();
-  }
-
-  // Initialize tid_array.
-  absl::base_internal::SpinLockHolder lock(&tid_lock);
-  tid_array = new std::vector<uint32_t>(1);
-  (*tid_array)[0] = 1;  // ID 0 is never-allocated.
-}
-
-// Return a per-thread small integer ID from pthread's thread-specific data.
+// Fallback implementation of `GetTID` using `pthread_self`.
 pid_t GetTID() {
-  absl::call_once(tid_once, InitGetTID);
-
-  intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
-  if (tid != 0) {
-    return tid;
-  }
-
-  int bit;  // tid_array[word] = 1u << bit;
-  size_t word;
-  {
-    // Search for the first unused ID.
-    absl::base_internal::SpinLockHolder lock(&tid_lock);
-    // First search for a word in the array that is not all ones.
-    word = 0;
-    while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
-      ++word;
-    }
-    if (word == tid_array->size()) {
-      tid_array->push_back(0);  // No space left, add kBitsPerWord more IDs.
-    }
-    // Search for a zero bit in the word.
-    bit = 0;
-    while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
-      ++bit;
-    }
-    tid = (word * kBitsPerWord) + bit;
-    (*tid_array)[word] |= 1u << bit;  // Mark the TID as allocated.
-  }
-
-  if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
-    perror("pthread_setspecific failed");
-    abort();
-  }
-
-  return static_cast<pid_t>(tid);
+  // `pthread_t` need not be arithmetic per POSIX; platforms where it isn't
+  // should be handled above.
+  return static_cast<pid_t>(pthread_self());
 }
 
 #endif
@@ -426,7 +456,7 @@
 // userspace construct) to avoid unnecessary system calls. Without this caching,
 // it can take roughly 98ns, while it takes roughly 1ns with this caching.
 pid_t GetCachedTID() {
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
   static thread_local pid_t thread_id = GetTID();
   return thread_id;
 #else
diff --git a/abseil-cpp/absl/base/internal/sysinfo_test.cc b/abseil-cpp/absl/base/internal/sysinfo_test.cc
index fa8b88b..f305b6c 100644
--- a/abseil-cpp/absl/base/internal/sysinfo_test.cc
+++ b/abseil-cpp/absl/base/internal/sysinfo_test.cc
@@ -37,18 +37,6 @@
       << "NumCPUs() should not have the default value of 0";
 }
 
-TEST(SysinfoTest, NominalCPUFrequency) {
-#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__)
-  EXPECT_GE(NominalCPUFrequency(), 1000.0)
-      << "NominalCPUFrequency() did not return a reasonable value";
-#else
-  // Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0.
-  // Emscripten does not have a sysfs to read from at all.
-  EXPECT_EQ(NominalCPUFrequency(), 1.0)
-      << "CPU frequency detection was fixed! Please update unittest.";
-#endif
-}
-
 TEST(SysinfoTest, GetTID) {
   EXPECT_EQ(GetTID(), GetTID());  // Basic compile and equality test.
 #ifdef __native_client__
diff --git a/abseil-cpp/absl/base/internal/thread_annotations.h b/abseil-cpp/absl/base/internal/thread_annotations.h
index 4dab6a9..8c5c67e 100644
--- a/abseil-cpp/absl/base/internal/thread_annotations.h
+++ b/abseil-cpp/absl/base/internal/thread_annotations.h
@@ -38,6 +38,13 @@
 #ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
 #define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
 
+// ABSL_LEGACY_THREAD_ANNOTATIONS is a *temporary* compatibility macro that can
+// be defined on the compile command-line to restore the legacy spellings of the
+// thread annotations macros/functions. The macros in this file are available
+// under ABSL_ prefixed spellings in absl/base/thread_annotations.h. This macro
+// and the legacy spellings will be removed in the future.
+#ifdef ABSL_LEGACY_THREAD_ANNOTATIONS
+
 #if defined(__clang__)
 #define THREAD_ANNOTATION_ATTRIBUTE__(x)   __attribute__((x))
 #else
@@ -268,4 +275,6 @@
 
 }  // namespace thread_safety_analysis
 
+#endif  // defined(ABSL_LEGACY_THREAD_ANNOTATIONS)
+
 #endif  // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
diff --git a/abseil-cpp/absl/base/internal/thread_identity.cc b/abseil-cpp/absl/base/internal/thread_identity.cc
index d63a04a..252443e 100644
--- a/abseil-cpp/absl/base/internal/thread_identity.cc
+++ b/abseil-cpp/absl/base/internal/thread_identity.cc
@@ -14,7 +14,7 @@
 
 #include "absl/base/internal/thread_identity.h"
 
-#ifndef _WIN32
+#if !defined(_WIN32) || defined(__MINGW32__)
 #include <pthread.h>
 #include <signal.h>
 #endif
@@ -23,6 +23,7 @@
 #include <cassert>
 #include <memory>
 
+#include "absl/base/attributes.h"
 #include "absl/base/call_once.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/spinlock.h"
@@ -53,19 +54,23 @@
 // exist within a process (via dlopen() or similar), references to
 // thread_identity_ptr from each instance of the code will refer to
 // *different* instances of this ptr.
-#ifdef __GNUC__
-__attribute__((visibility("protected")))
-#endif  // __GNUC__
+// Apple platforms have the visibility attribute, but issue a compile warning
+// that protected visibility is unsupported.
+ABSL_CONST_INIT  // Must come before __attribute__((visibility("protected")))
+#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+    __attribute__((visibility("protected")))
+#endif  // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
 #if ABSL_PER_THREAD_TLS
-// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
-ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+    // Prefer __thread to thread_local as benchmarks indicate it is a bit
+    // faster.
+    ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
 #elif defined(ABSL_HAVE_THREAD_LOCAL)
-thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+    thread_local ThreadIdentity* thread_identity_ptr = nullptr;
 #endif  // ABSL_PER_THREAD_TLS
 #endif  // TLS or CPP11
 
-void SetCurrentThreadIdentity(
-    ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer) {
   assert(CurrentThreadIdentityIfPresent() == nullptr);
   // Associate our destructor.
   // NOTE: This call to pthread_setspecific is currently the only immovable
@@ -75,7 +80,7 @@
   absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
                   reclaimer);
 
-#if defined(__EMSCRIPTEN__) || defined(__MINGW32__)
+#if defined(__EMSCRIPTEN__) || defined(__MINGW32__) || defined(__hexagon__)
   // Emscripten and MinGW pthread implementations does not support signals.
   // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
   // for more information.
@@ -117,10 +122,10 @@
     ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
 
 // Please see the comment on `CurrentThreadIdentityIfPresent` in
-// thread_identity.h. Because DLLs cannot expose thread_local variables in
-// headers, we opt for the correct-but-slower option of placing the definition
-// of this function only in a translation unit inside DLL.
-#if defined(ABSL_BUILD_DLL) || defined(ABSL_CONSUME_DLL)
+// thread_identity.h. When we cannot expose thread_local variables in
+// headers, we opt for the correct-but-slower option of not inlining this
+// function.
+#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
 ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
 #endif
 #endif
@@ -130,7 +135,7 @@
     ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
   thread_identity_ptr = nullptr;
 #elif ABSL_THREAD_IDENTITY_MODE == \
-      ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
   // pthread_setspecific expected to clear value on destruction
   assert(CurrentThreadIdentityIfPresent() == nullptr);
 #endif
diff --git a/abseil-cpp/absl/base/internal/thread_identity.h b/abseil-cpp/absl/base/internal/thread_identity.h
index ceb109b..b6e917c 100644
--- a/abseil-cpp/absl/base/internal/thread_identity.h
+++ b/abseil-cpp/absl/base/internal/thread_identity.h
@@ -32,6 +32,7 @@
 
 #include "absl/base/config.h"
 #include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -61,38 +62,36 @@
     return reinterpret_cast<ThreadIdentity*>(this);
   }
 
-  PerThreadSynch *next;  // Circular waiter queue; initialized to 0.
-  PerThreadSynch *skip;  // If non-zero, all entries in Mutex queue
+  PerThreadSynch* next;  // Circular waiter queue; initialized to 0.
+  PerThreadSynch* skip;  // If non-zero, all entries in Mutex queue
                          // up to and including "skip" have same
                          // condition as this, and will be woken later
   bool may_skip;         // if false while on mutex queue, a mutex unlocker
                          // is using this PerThreadSynch as a terminator.  Its
                          // skip field must not be filled in because the loop
                          // might then skip over the terminator.
-
-  // The wait parameters of the current wait.  waitp is null if the
-  // thread is not waiting. Transitions from null to non-null must
-  // occur before the enqueue commit point (state = kQueued in
-  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
-  // null must occur after the wait is finished (state = kAvailable in
-  // Mutex::Block() and CondVar::WaitCommon()). This field may be
-  // changed only by the thread that describes this PerThreadSynch.  A
-  // special case is Fer(), which calls Enqueue() on another thread,
-  // but with an identical SynchWaitParams pointer, thus leaving the
-  // pointer unchanged.
-  SynchWaitParams *waitp;
-
-  bool suppress_fatal_errors;  // If true, try to proceed even in the face of
-                               // broken invariants.  This is used within fatal
-                               // signal handlers to improve the chances of
-                               // debug logging information being output
-                               // successfully.
-
-  intptr_t readers;     // Number of readers in mutex.
-  int priority;         // Priority of thread (updated every so often).
-
-  // When priority will next be read (cycles).
-  int64_t next_priority_read_cycles;
+  bool wake;             // This thread is to be woken from a Mutex.
+  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+  //
+  // The value of "x->cond_waiter" is meaningless if "x" is not on a
+  // Mutex waiter list.
+  bool cond_waiter;
+  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
+                         // true if UnlockSlow could be searching
+                         // for a waiter to wake.  Used for an optimization
+                         // in Enqueue().  true is always a valid value.
+                         // Can be reset to false when the unlocker or any
+                         // writer releases the lock, or a reader fully
+                         // releases the lock.  It may not be set to false
+                         // by a reader that decrements the count to
+                         // non-zero. protected by mutex spinlock
+  bool suppress_fatal_errors;  // If true, try to proceed even in the face
+                               // of broken invariants.  This is used within
+                               // fatal signal handlers to improve the
+                               // chances of debug logging information being
+                               // output successfully.
+  int priority;                // Priority of thread (updated every so often).
 
   // State values:
   //   kAvailable: This PerThreadSynch is available.
@@ -105,36 +104,37 @@
   //
   // Transitions from kAvailable to kQueued require no barrier, they
   // are externally ordered by the Mutex.
-  enum State {
-    kAvailable,
-    kQueued
-  };
+  enum State { kAvailable, kQueued };
   std::atomic<State> state;
 
-  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
-                         // true if UnlockSlow could be searching
-                         // for a waiter to wake.  Used for an optimization
-                         // in Enqueue().  true is always a valid value.
-                         // Can be reset to false when the unlocker or any
-                         // writer releases the lock, or a reader fully releases
-                         // the lock.  It may not be set to false by a reader
-                         // that decrements the count to non-zero.
-                         // protected by mutex spinlock
+  // The wait parameters of the current wait.  waitp is null if the
+  // thread is not waiting. Transitions from null to non-null must
+  // occur before the enqueue commit point (state = kQueued in
+  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+  // null must occur after the wait is finished (state = kAvailable in
+  // Mutex::Block() and CondVar::WaitCommon()). This field may be
+  // changed only by the thread that describes this PerThreadSynch.  A
+  // special case is Fer(), which calls Enqueue() on another thread,
+  // but with an identical SynchWaitParams pointer, thus leaving the
+  // pointer unchanged.
+  SynchWaitParams* waitp;
 
-  bool wake;  // This thread is to be woken from a Mutex.
+  intptr_t readers;  // Number of readers in mutex.
 
-  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
-  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
-  //
-  // The value of "x->cond_waiter" is meaningless if "x" is not on a
-  // Mutex waiter list.
-  bool cond_waiter;
+  // When priority will next be read (cycles).
+  int64_t next_priority_read_cycles;
 
   // Locks held; used during deadlock detection.
   // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
-  SynchLocksHeld *all_locks;
+  SynchLocksHeld* all_locks;
 };
 
+// The instances of this class are allocated in NewThreadIdentity() with an
+// alignment of PerThreadSynch::kAlignment.
+//
+// NOTE: The layout of fields in this structure is critical, please do not
+//       add, remove, or modify the field placements without fully auditing the
+//       layout.
 struct ThreadIdentity {
   // Must be the first member.  The Mutex implementation requires that
   // the PerThreadSynch object associated with each thread is
@@ -144,7 +144,7 @@
 
   // Private: Reserved for absl::synchronization_internal::Waiter.
   struct WaiterState {
-    char data[128];
+    alignas(void*) char data[256];
   } waiter_state;
 
   // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@@ -167,7 +167,10 @@
 //
 // Does not malloc(*), and is async-signal safe.
 // [*] Technically pthread_setspecific() does malloc on first use; however this
-// is handled internally within tcmalloc's initialization already.
+// is handled internally within tcmalloc's initialization already. Note that
+// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
+// on Apple platforms. Whatever function is calling your MallocHooks will need
+// to watch for recursion on Apple platforms.
 //
 // New ThreadIdentity objects can be constructed and associated with a thread
 // by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
@@ -189,29 +192,31 @@
 // May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
 // index>
 #ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
-#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set
+#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
 #else
 #define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
 #endif
 
 #ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
-#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set
+#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
 #else
 #define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
 #endif
 
 #ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set
+#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
 #else
 #define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
 #endif
 
 #ifdef ABSL_THREAD_IDENTITY_MODE
-#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set
+#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
 #elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
 #define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
 #elif defined(_WIN32) && !defined(__MINGW32__)
 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
 #elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
     (__GOOGLE_GRTE_VERSION__ >= 20140228L)
 // Support for async-safe TLS was specifically added in GRTEv4.  It's not
@@ -235,13 +240,18 @@
 #error Thread-local storage not detected on this platform
 #endif
 
-// thread_local variables cannot be in headers exposed by DLLs. However, it is
-// important for performance reasons in general that
-// `CurrentThreadIdentityIfPresent` be inlined. This is not possible across a
-// DLL boundary so, with DLLs, we opt to have the function not be inlined. Note
+// thread_local variables cannot be in headers exposed by DLLs or in certain
+// build configurations on Apple platforms. However, it is important for
+// performance reasons in general that `CurrentThreadIdentityIfPresent` be
+// inlined. In the other cases we opt to have the function not be inlined. Note
 // that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
-// this entire inline definition when compiling as a DLL.
-#if !defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL)
+// this entire inline definition.
+#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
+    !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
+#endif
+
+#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
 inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
   return thread_identity_ptr;
 }
diff --git a/abseil-cpp/absl/base/internal/thread_identity_test.cc b/abseil-cpp/absl/base/internal/thread_identity_test.cc
index 46a6f74..5f17553 100644
--- a/abseil-cpp/absl/base/internal/thread_identity_test.cc
+++ b/abseil-cpp/absl/base/internal/thread_identity_test.cc
@@ -95,7 +95,7 @@
 }
 
 TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
-  // This test repeatly creates and joins a series of threads, each of
+  // This test repeatedly creates and joins a series of threads, each of
   // which acquires and releases shared Mutex locks. This verifies
   // Mutex operations work correctly under a reused
   // ThreadIdentity. Note that the most likely failure mode of this
diff --git a/abseil-cpp/absl/base/internal/throw_delegate.cc b/abseil-cpp/absl/base/internal/throw_delegate.cc
index c055f75..337e870 100644
--- a/abseil-cpp/absl/base/internal/throw_delegate.cc
+++ b/abseil-cpp/absl/base/internal/throw_delegate.cc
@@ -18,6 +18,7 @@
 #include <functional>
 #include <new>
 #include <stdexcept>
+
 #include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 
@@ -25,83 +26,177 @@
 ABSL_NAMESPACE_BEGIN
 namespace base_internal {
 
-namespace {
-template <typename T>
-[[noreturn]] void Throw(const T& error) {
+// NOTE: The exception types, like `std::logic_error`, do not exist on all
+// platforms. (For example, the Android NDK does not have them.)
+// Therefore, their use must be guarded by `#ifdef` or equivalent.
+
+void ThrowStdLogicError(const std::string& what_arg) {
 #ifdef ABSL_HAVE_EXCEPTIONS
-  throw error;
+  throw std::logic_error(what_arg);
 #else
-  ABSL_RAW_LOG(FATAL, "%s", error.what());
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
   std::abort();
 #endif
 }
-}  // namespace
-
-void ThrowStdLogicError(const std::string& what_arg) {
-  Throw(std::logic_error(what_arg));
-}
 void ThrowStdLogicError(const char* what_arg) {
-  Throw(std::logic_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::logic_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 void ThrowStdInvalidArgument(const std::string& what_arg) {
-  Throw(std::invalid_argument(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::invalid_argument(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdInvalidArgument(const char* what_arg) {
-  Throw(std::invalid_argument(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::invalid_argument(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdDomainError(const std::string& what_arg) {
-  Throw(std::domain_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::domain_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdDomainError(const char* what_arg) {
-  Throw(std::domain_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::domain_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdLengthError(const std::string& what_arg) {
-  Throw(std::length_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::length_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdLengthError(const char* what_arg) {
-  Throw(std::length_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::length_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdOutOfRange(const std::string& what_arg) {
-  Throw(std::out_of_range(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::out_of_range(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdOutOfRange(const char* what_arg) {
-  Throw(std::out_of_range(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::out_of_range(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdRuntimeError(const std::string& what_arg) {
-  Throw(std::runtime_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::runtime_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdRuntimeError(const char* what_arg) {
-  Throw(std::runtime_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::runtime_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdRangeError(const std::string& what_arg) {
-  Throw(std::range_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::range_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdRangeError(const char* what_arg) {
-  Throw(std::range_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::range_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdOverflowError(const std::string& what_arg) {
-  Throw(std::overflow_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::overflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdOverflowError(const char* what_arg) {
-  Throw(std::overflow_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::overflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
 void ThrowStdUnderflowError(const std::string& what_arg) {
-  Throw(std::underflow_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::underflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
 }
 void ThrowStdUnderflowError(const char* what_arg) {
-  Throw(std::underflow_error(what_arg));
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::underflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
 }
 
-void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
+void ThrowStdBadFunctionCall() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::bad_function_call();
+#else
+  std::abort();
+#endif
+}
 
-void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
+void ThrowStdBadAlloc() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::bad_alloc();
+#else
+  std::abort();
+#endif
+}
 
 }  // namespace base_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/base/internal/unaligned_access.h b/abseil-cpp/absl/base/internal/unaligned_access.h
index dd5250d..093dd9b 100644
--- a/abseil-cpp/absl/base/internal/unaligned_access.h
+++ b/abseil-cpp/absl/base/internal/unaligned_access.h
@@ -31,80 +31,6 @@
 // The unaligned API is C++ only.  The declarations use C++ features
 // (namespaces, inline) which are absent or incompatible in C.
 #if defined(__cplusplus)
-
-#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
-    defined(ABSL_HAVE_THREAD_SANITIZER) || defined(ABSL_HAVE_MEMORY_SANITIZER)
-// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
-// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
-// will miss a bug if 08 is the first unaddressable byte.
-// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
-// miss a race between this access and some other accesses to 08.
-// MemorySanitizer will correctly propagate the shadow on unaligned stores
-// and correctly report bugs on unaligned loads, but it may not properly
-// update and report the origin of the uninitialized memory.
-// For all three tools, replacing an unaligned access with a tool-specific
-// callback solves the problem.
-
-// Make sure uint16_t/uint32_t/uint64_t are defined.
-#include <stdint.h>
-
-extern "C" {
-uint16_t __sanitizer_unaligned_load16(const void *p);
-uint32_t __sanitizer_unaligned_load32(const void *p);
-uint64_t __sanitizer_unaligned_load64(const void *p);
-void __sanitizer_unaligned_store16(void *p, uint16_t v);
-void __sanitizer_unaligned_store32(void *p, uint32_t v);
-void __sanitizer_unaligned_store64(void *p, uint64_t v);
-}  // extern "C"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-inline uint16_t UnalignedLoad16(const void *p) {
-  return __sanitizer_unaligned_load16(p);
-}
-
-inline uint32_t UnalignedLoad32(const void *p) {
-  return __sanitizer_unaligned_load32(p);
-}
-
-inline uint64_t UnalignedLoad64(const void *p) {
-  return __sanitizer_unaligned_load64(p);
-}
-
-inline void UnalignedStore16(void *p, uint16_t v) {
-  __sanitizer_unaligned_store16(p, v);
-}
-
-inline void UnalignedStore32(void *p, uint32_t v) {
-  __sanitizer_unaligned_store32(p, v);
-}
-
-inline void UnalignedStore64(void *p, uint64_t v) {
-  __sanitizer_unaligned_store64(p, v);
-}
-
-}  // namespace base_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
-  (absl::base_internal::UnalignedLoad16(_p))
-#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
-  (absl::base_internal::UnalignedLoad32(_p))
-#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
-  (absl::base_internal::UnalignedLoad64(_p))
-
-#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
-  (absl::base_internal::UnalignedStore16(_p, _val))
-#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
-  (absl::base_internal::UnalignedStore32(_p, _val))
-#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
-  (absl::base_internal::UnalignedStore64(_p, _val))
-
-#else
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace base_internal {
@@ -151,8 +77,6 @@
 #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
   (absl::base_internal::UnalignedStore64(_p, _val))
 
-#endif
-
 #endif  // defined(__cplusplus), end of unaligned API
 
 #endif  // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
diff --git a/abseil-cpp/absl/base/internal/unscaledcycleclock.cc b/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
index f1e7bbe..05e0e7b 100644
--- a/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
+++ b/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
@@ -24,8 +24,13 @@
 #ifdef __GLIBC__
 #include <sys/platform/ppc.h>
 #elif defined(__FreeBSD__)
-#include <sys/sysctl.h>
+// clang-format off
+// This order does actually matter =(.
 #include <sys/types.h>
+#include <sys/sysctl.h>
+// clang-format on
+
+#include "absl/base/call_once.h"
 #endif
 #endif
 
@@ -49,12 +54,6 @@
 
 #elif defined(__x86_64__)
 
-int64_t UnscaledCycleClock::Now() {
-  uint64_t low, high;
-  __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
-  return (high << 32) | low;
-}
-
 double UnscaledCycleClock::Frequency() {
   return base_internal::NominalCPUFrequency();
 }
@@ -72,13 +71,12 @@
 #else
   int32_t tbu, tbl, tmp;
   asm volatile(
-      "0:\n"
       "mftbu %[hi32]\n"
       "mftb %[lo32]\n"
       "mftbu %[tmp]\n"
       "cmpw %[tmp],%[hi32]\n"
-      "bne 0b\n"
-      : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp));
+      "bne $-16\n"  // Retry on failure.
+      : [hi32] "=r"(tbu), [lo32] "=r"(tbl), [tmp] "=r"(tmp));
   return (static_cast<int64_t>(tbu) << 32) | tbl;
 #endif
 #endif
@@ -87,6 +85,10 @@
 double UnscaledCycleClock::Frequency() {
 #ifdef __GLIBC__
   return __ppc_get_timebase_freq();
+#elif defined(_AIX)
+  // This is the same constant value as returned by
+  // __ppc_get_timebase_freq().
+  return static_cast<double>(512000000);
 #elif defined(__FreeBSD__)
   static once_flag init_timebase_frequency_once;
   static double timebase_frequency = 0.0;
@@ -119,13 +121,23 @@
   return aarch64_timer_frequency;
 }
 
+#elif defined(__riscv)
+
+int64_t UnscaledCycleClock::Now() {
+  int64_t virtual_timer_value;
+  asm volatile("rdcycle %0" : "=r"(virtual_timer_value));
+  return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
 #elif defined(_M_IX86) || defined(_M_X64)
 
 #pragma intrinsic(__rdtsc)
 
-int64_t UnscaledCycleClock::Now() {
-  return __rdtsc();
-}
+int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
 
 double UnscaledCycleClock::Frequency() {
   return base_internal::NominalCPUFrequency();
diff --git a/abseil-cpp/absl/base/internal/unscaledcycleclock.h b/abseil-cpp/absl/base/internal/unscaledcycleclock.h
index 82f2c87..cc1276b 100644
--- a/abseil-cpp/absl/base/internal/unscaledcycleclock.h
+++ b/abseil-cpp/absl/base/internal/unscaledcycleclock.h
@@ -42,49 +42,11 @@
 #include <TargetConditionals.h>
 #endif
 
-#include "absl/base/port.h"
-
-// The following platforms have an implementation of a hardware counter.
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
-  defined(__powerpc__) || defined(__ppc__) || \
-  defined(_M_IX86) || defined(_M_X64)
-#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
-#else
-#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
-#endif
-
-// The following platforms often disable access to the hardware
-// counter (through a sandbox) even if the underlying hardware has a
-// usable counter. The CycleTimer interface also requires a *scaled*
-// CycleClock that runs at atleast 1 MHz. We've found some Android
-// ARM64 devices where this is not the case, so we disable it by
-// default on Android ARM64.
-#if defined(__native_client__) ||                      \
-    (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
-    (defined(__ANDROID__) && defined(__aarch64__))
-#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
-#else
-#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
-#endif
-
-// UnscaledCycleClock is an optional internal feature.
-// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
-// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
-#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
-#define ABSL_USE_UNSCALED_CYCLECLOCK               \
-  (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
-   ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
-#endif
+#include "absl/base/config.h"
+#include "absl/base/internal/unscaledcycleclock_config.h"
 
 #if ABSL_USE_UNSCALED_CYCLECLOCK
 
-// This macro can be used to test if UnscaledCycleClock::Frequency()
-// is NominalCPUFrequency() on a particular platform.
-#if  (defined(__i386__) || defined(__x86_64__) || \
-      defined(_M_IX86) || defined(_M_X64))
-#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-#endif
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace time_internal {
@@ -115,6 +77,16 @@
   friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
 };
 
+#if defined(__x86_64__)
+
+inline int64_t UnscaledCycleClock::Now() {
+  uint64_t low, high;
+  __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+  return static_cast<int64_t>((high << 32) | low);
+}
+
+#endif
+
 }  // namespace base_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h b/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h
new file mode 100644
index 0000000..24b324a
--- /dev/null
+++ b/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h
@@ -0,0 +1,62 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+// The following platforms have an implementation of a hardware counter.
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+    defined(__powerpc__) || defined(__ppc__) || defined(__riscv) ||     \
+    defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
+#else
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
+#endif
+
+// The following platforms often disable access to the hardware
+// counter (through a sandbox) even if the underlying hardware has a
+// usable counter. The CycleTimer interface also requires a *scaled*
+// CycleClock that runs at atleast 1 MHz. We've found some Android
+// ARM64 devices where this is not the case, so we disable it by
+// default on Android ARM64.
+#if defined(__native_client__) || (defined(__APPLE__)) || \
+    (defined(__ANDROID__) && defined(__aarch64__))
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
+#else
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
+#endif
+
+// UnscaledCycleClock is an optional internal feature.
+// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
+// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
+#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
+#define ABSL_USE_UNSCALED_CYCLECLOCK               \
+  (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
+   ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
+#endif
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+// This macro can be used to test if UnscaledCycleClock::Frequency()
+// is NominalCPUFrequency() on a particular platform.
+#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
+     defined(_M_IX86) || defined(_M_X64))
+#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+#endif
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_
diff --git a/abseil-cpp/absl/base/invoke_test.cc b/abseil-cpp/absl/base/invoke_test.cc
index bcdef36..7be26f6 100644
--- a/abseil-cpp/absl/base/invoke_test.cc
+++ b/abseil-cpp/absl/base/invoke_test.cc
@@ -31,6 +31,14 @@
 
 int Function(int a, int b) { return a - b; }
 
+void VoidFunction(int& a, int& b) {
+  a += b;
+  b = a - b;
+  a -= b;
+}
+
+int ZeroArgFunction() { return -1937; }
+
 int Sink(std::unique_ptr<int> p) {
   return *p;
 }
@@ -223,6 +231,100 @@
   EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42));
 }
 
+TEST(IsInvocableRTest, CallableExactMatch) {
+  static_assert(
+      base_internal::is_invocable_r<int, decltype(Function), int, int>::value,
+      "Should be true for exact match of types on a free function");
+}
+
+TEST(IsInvocableRTest, CallableArgumentConversionMatch) {
+  static_assert(
+      base_internal::is_invocable_r<int, decltype(Function), char, int>::value,
+      "Should be true for convertible argument type");
+}
+
+TEST(IsInvocableRTest, CallableReturnConversionMatch) {
+  static_assert(base_internal::is_invocable_r<double, decltype(Function), int,
+                                              int>::value,
+                "Should be true for convertible return type");
+}
+
+TEST(IsInvocableRTest, CallableReturnVoid) {
+  static_assert(base_internal::is_invocable_r<void, decltype(VoidFunction),
+                                              int&, int&>::value,
+                "Should be true for void expected and actual return types");
+  static_assert(
+      base_internal::is_invocable_r<void, decltype(Function), int, int>::value,
+      "Should be true for void expected and non-void actual return types");
+}
+
+TEST(IsInvocableRTest, CallableRefQualifierMismatch) {
+  static_assert(!base_internal::is_invocable_r<void, decltype(VoidFunction),
+                                               int&, const int&>::value,
+                "Should be false for reference constness mismatch");
+  static_assert(!base_internal::is_invocable_r<void, decltype(VoidFunction),
+                                               int&&, int&>::value,
+                "Should be false for reference value category mismatch");
+}
+
+TEST(IsInvocableRTest, CallableArgumentTypeMismatch) {
+  static_assert(!base_internal::is_invocable_r<int, decltype(Function),
+                                               std::string, int>::value,
+                "Should be false for argument type mismatch");
+}
+
+TEST(IsInvocableRTest, CallableReturnTypeMismatch) {
+  static_assert(!base_internal::is_invocable_r<std::string, decltype(Function),
+                                               int, int>::value,
+                "Should be false for return type mismatch");
+}
+
+TEST(IsInvocableRTest, CallableTooFewArgs) {
+  static_assert(
+      !base_internal::is_invocable_r<int, decltype(Function), int>::value,
+      "Should be false for too few arguments");
+}
+
+TEST(IsInvocableRTest, CallableTooManyArgs) {
+  static_assert(!base_internal::is_invocable_r<int, decltype(Function), int,
+                                               int, int>::value,
+                "Should be false for too many arguments");
+}
+
+TEST(IsInvocableRTest, MemberFunctionAndReference) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::Method),
+                                              Class&, int, int>::value,
+                "Should be true for exact match of types on a member function "
+                "and class reference");
+}
+
+TEST(IsInvocableRTest, MemberFunctionAndPointer) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::Method),
+                                              Class*, int, int>::value,
+                "Should be true for exact match of types on a member function "
+                "and class pointer");
+}
+
+TEST(IsInvocableRTest, DataMemberAndReference) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::member),
+                                              Class&>::value,
+                "Should be true for exact match of types on a data member and "
+                "class reference");
+}
+
+TEST(IsInvocableRTest, DataMemberAndPointer) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::member),
+                                              Class*>::value,
+                "Should be true for exact match of types on a data member and "
+                "class pointer");
+}
+
+TEST(IsInvocableRTest, CallableZeroArgs) {
+  static_assert(
+      base_internal::is_invocable_r<int, decltype(ZeroArgFunction)>::value,
+      "Should be true for exact match for a zero-arg free function");
+}
+
 }  // namespace
 }  // namespace base_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/base/log_severity.cc b/abseil-cpp/absl/base/log_severity.cc
index 72312af..60a8fc1 100644
--- a/abseil-cpp/absl/base/log_severity.cc
+++ b/abseil-cpp/absl/base/log_severity.cc
@@ -16,6 +16,8 @@
 
 #include <ostream>
 
+#include "absl/base/attributes.h"
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
@@ -23,5 +25,31 @@
   if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s);
   return os << "absl::LogSeverity(" << static_cast<int>(s) << ")";
 }
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) {
+  switch (s) {
+    case absl::LogSeverityAtLeast::kInfo:
+    case absl::LogSeverityAtLeast::kWarning:
+    case absl::LogSeverityAtLeast::kError:
+    case absl::LogSeverityAtLeast::kFatal:
+      return os << ">=" << static_cast<absl::LogSeverity>(s);
+    case absl::LogSeverityAtLeast::kInfinity:
+      return os << "INFINITY";
+  }
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) {
+  switch (s) {
+    case absl::LogSeverityAtMost::kInfo:
+    case absl::LogSeverityAtMost::kWarning:
+    case absl::LogSeverityAtMost::kError:
+    case absl::LogSeverityAtMost::kFatal:
+      return os << "<=" << static_cast<absl::LogSeverity>(s);
+    case absl::LogSeverityAtMost::kNegativeInfinity:
+      return os << "NEGATIVE_INFINITY";
+  }
+  return os;
+}
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/log_severity.h b/abseil-cpp/absl/base/log_severity.h
index 65a3b16..8bdca38 100644
--- a/abseil-cpp/absl/base/log_severity.h
+++ b/abseil-cpp/absl/base/log_severity.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
-#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
+#ifndef ABSL_BASE_LOG_SEVERITY_H_
+#define ABSL_BASE_LOG_SEVERITY_H_
 
 #include <array>
 #include <ostream>
@@ -36,7 +36,7 @@
 // such values to a defined severity level, however in some cases values other
 // than the defined levels are useful for comparison.
 //
-// Exmaple:
+// Example:
 //
 //   // Effectively disables all logging:
 //   SetMinLogLevel(static_cast<absl::LogSeverity>(100));
@@ -115,7 +115,58 @@
 // unspecified; do not rely on it.
 std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);
 
+// Enums representing a lower bound for LogSeverity. APIs that only operate on
+// messages of at least a certain level (for example, `SetMinLogLevel()`) use
+// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is
+// a level above all threshold levels and therefore no log message will
+// ever meet this threshold.
+enum class LogSeverityAtLeast : int {
+  kInfo = static_cast<int>(absl::LogSeverity::kInfo),
+  kWarning = static_cast<int>(absl::LogSeverity::kWarning),
+  kError = static_cast<int>(absl::LogSeverity::kError),
+  kFatal = static_cast<int>(absl::LogSeverity::kFatal),
+  kInfinity = 1000,
+};
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s);
+
+// Enums representing an upper bound for LogSeverity. APIs that only operate on
+// messages of at most a certain level (for example, buffer all messages at or
+// below a certain level) use this type to specify that level.
+// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold
+// levels and therefore will exclude all log messages.
+enum class LogSeverityAtMost : int {
+  kNegativeInfinity = -1000,
+  kInfo = static_cast<int>(absl::LogSeverity::kInfo),
+  kWarning = static_cast<int>(absl::LogSeverity::kWarning),
+  kError = static_cast<int>(absl::LogSeverity::kError),
+  kFatal = static_cast<int>(absl::LogSeverity::kFatal),
+};
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s);
+
+#define COMPOP(op1, op2, T)                                         \
+  constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \
+    return static_cast<absl::LogSeverity>(lhs) op1 rhs;             \
+  }                                                                 \
+  constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \
+    return lhs op2 static_cast<absl::LogSeverity>(rhs);             \
+  }
+
+// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/
+// `LogSeverityAtMost` are only supported in one direction.
+// Valid checks are:
+//   LogSeverity >= LogSeverityAtLeast
+//   LogSeverity < LogSeverityAtLeast
+//   LogSeverity <= LogSeverityAtMost
+//   LogSeverity > LogSeverityAtMost
+COMPOP(>, <, LogSeverityAtLeast)
+COMPOP(<=, >=, LogSeverityAtLeast)
+COMPOP(<, >, LogSeverityAtMost)
+COMPOP(>=, <=, LogSeverityAtMost)
+#undef COMPOP
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
+#endif  // ABSL_BASE_LOG_SEVERITY_H_
diff --git a/abseil-cpp/absl/base/log_severity_test.cc b/abseil-cpp/absl/base/log_severity_test.cc
index 2c6872b..16091a5 100644
--- a/abseil-cpp/absl/base/log_severity_test.cc
+++ b/abseil-cpp/absl/base/log_severity_test.cc
@@ -35,7 +35,8 @@
 using ::testing::TestWithParam;
 using ::testing::Values;
 
-std::string StreamHelper(absl::LogSeverity value) {
+template <typename T>
+std::string StreamHelper(T value) {
   std::ostringstream stream;
   stream << value;
   return stream.str();
@@ -52,9 +53,9 @@
               Eq("absl::LogSeverity(4)"));
 }
 
-static_assert(
-    absl::flags_internal::FlagUseOneWordStorage<absl::LogSeverity>::value,
-    "Flags of type absl::LogSeverity ought to be lock-free.");
+static_assert(absl::flags_internal::FlagUseValueAndInitBitStorage<
+                  absl::LogSeverity>::value,
+              "Flags of type absl::LogSeverity ought to be lock-free.");
 
 using ParseFlagFromOutOfRangeIntegerTest = TestWithParam<int64_t>;
 INSTANTIATE_TEST_SUITE_P(
@@ -201,4 +202,44 @@
               IsTrue());
   EXPECT_THAT(reparsed_value, Eq(to_unparse));
 }
+
+TEST(LogThresholdTest, LogSeverityAtLeastTest) {
+  EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal);
+  EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo);
+
+  EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError);
+  EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo);
+}
+
+TEST(LogThresholdTest, LogSeverityAtMostTest) {
+  EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning);
+  EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal);
+
+  EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError);
+  EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError);
+}
+
+TEST(LogThresholdTest, Extremes) {
+  EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity);
+  EXPECT_GT(absl::LogSeverity::kInfo,
+            absl::LogSeverityAtMost::kNegativeInfinity);
+}
+
+TEST(LogThresholdTest, Output) {
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning),
+              Eq(">=WARNING"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity),
+              Eq("INFINITY"));
+
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity),
+              Eq("NEGATIVE_INFINITY"));
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/base/macros.h b/abseil-cpp/absl/base/macros.h
index 02dd9ff..f33cd19 100644
--- a/abseil-cpp/absl/base/macros.h
+++ b/abseil-cpp/absl/base/macros.h
@@ -103,17 +103,11 @@
 // aborts the program in release mode (when NDEBUG is defined). The
 // implementation should abort the program as quickly as possible and ideally it
 // should not be possible to ignore the abort request.
-#if (ABSL_HAVE_BUILTIN(__builtin_trap) &&         \
-     ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \
-    (defined(__GNUC__) && !defined(__clang__))
-#define ABSL_INTERNAL_HARDENING_ABORT() \
-  do {                                  \
-    __builtin_trap();                   \
-    __builtin_unreachable();            \
+#define ABSL_INTERNAL_HARDENING_ABORT()   \
+  do {                                    \
+    ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \
+    ABSL_INTERNAL_UNREACHABLE_IMPL();     \
   } while (false)
-#else
-#define ABSL_INTERNAL_HARDENING_ABORT() abort()
-#endif
 
 // ABSL_HARDENING_ASSERT()
 //
diff --git a/abseil-cpp/absl/base/nullability.h b/abseil-cpp/absl/base/nullability.h
new file mode 100644
index 0000000..6f49b6f
--- /dev/null
+++ b/abseil-cpp/absl/base/nullability.h
@@ -0,0 +1,224 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: nullability.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of "templated annotations" for designating the
+// expected nullability of pointers. These annotations allow you to designate
+// pointers in one of three classification states:
+//
+//  * "Non-null" (for pointers annotated `Nonnull<T>`), indicating that it is
+//    invalid for the given pointer to ever be null.
+//  * "Nullable" (for pointers annotated `Nullable<T>`), indicating that it is
+//    valid for the given pointer to be null.
+//  * "Unknown" (for pointers annotated `NullabilityUnknown<T>`), indicating
+//    that the given pointer has not been yet classified as either nullable or
+//    non-null. This is the default state of unannotated pointers.
+//
+// NOTE: unannotated pointers implicitly bear the annotation
+// `NullabilityUnknown<T>`; you should rarely, if ever, see this annotation used
+// in the codebase explicitly.
+//
+// -----------------------------------------------------------------------------
+// Nullability and Contracts
+// -----------------------------------------------------------------------------
+//
+// These nullability annotations allow you to more clearly specify contracts on
+// software components by narrowing the *preconditions*, *postconditions*, and
+// *invariants* of pointer state(s) in any given interface. It then depends on
+// context who is responsible for fulfilling the annotation's requirements.
+//
+// For example, a function may receive a pointer argument. Designating that
+// pointer argument as "non-null" tightens the precondition of the contract of
+// that function. It is then the responsibility of anyone calling such a
+// function to ensure that the passed pointer is not null.
+//
+// Similarly, a function may have a pointer as a return value. Designating that
+// return value as "non-null" tightens the postcondition of the contract of that
+// function. In this case, however, it is the responsibility of the function
+// itself to ensure that the returned pointer is not null.
+//
+// Clearly defining these contracts allows providers (and consumers) of such
+// pointers to have more confidence in their null state. If a function declares
+// a return value as "non-null", for example, the caller should not need to
+// check whether the returned value is `nullptr`; it can simply assume the
+// pointer is valid.
+//
+// Of course most interfaces already have expectations on the nullability state
+// of pointers, and these expectations are, in effect, a contract; often,
+// however, those contracts are either poorly or partially specified, assumed,
+// or misunderstood. These nullability annotations are designed to allow you to
+// formalize those contracts within the codebase.
+//
+// -----------------------------------------------------------------------------
+// Using Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// It is important to note that these annotations are not distinct strong
+// *types*. They are alias templates defined to be equal to the underlying
+// pointer type. A pointer annotated `Nonnull<T*>`, for example, is simply a
+// pointer of type `T*`. Each annotation acts as a form of documentation about
+// the contract for the given pointer. Each annotation requires providers or
+// consumers of these pointers across API boundaries to take appropriate steps
+// when setting or using these pointers:
+//
+// * "Non-null" pointers should never be null. It is the responsibility of the
+//   provider of this pointer to ensure that the pointer may never be set to
+//   null. Consumers of such pointers can treat such pointers as non-null.
+// * "Nullable" pointers may or may not be null. Consumers of such pointers
+//   should precede any usage of that pointer (e.g. a dereference operation)
+//   with a a `nullptr` check.
+// * "Unknown" pointers may be either "non-null" or "nullable" but have not been
+//   definitively determined to be in either classification state. Providers of
+//   such pointers across API boundaries should determine --  over time -- to
+//   annotate the pointer in either of the above two states. Consumers of such
+//   pointers across an API boundary should continue to treat such pointers as
+//   they currently do.
+//
+// Example:
+//
+// // PaySalary() requires the passed pointer to an `Employee` to be non-null.
+// void PaySalary(absl::Nonnull<Employee *> e) {
+//   pay(e->salary);  // OK to dereference
+// }
+//
+// // CompleteTransaction() guarantees the returned pointer to an `Account` to
+// // be non-null.
+// absl::Nonnull<Account *> balance CompleteTransaction(double fee) {
+// ...
+// }
+//
+// // Note that specifying a nullability annotation does not prevent someone
+// // from violating the contract:
+//
+// Nullable<Employee *> find(Map& employees, std::string_view name);
+//
+// void g(Map& employees) {
+//   Employee *e = find(employees, "Pat");
+//   // `e` can now be null.
+//   PaySalary(e); // Violates contract, but compiles!
+// }
+//
+// Nullability annotations, in other words, are useful for defining and
+// narrowing contracts; *enforcement* of those contracts depends on use and any
+// additional (static or dynamic analysis) tooling.
+//
+// NOTE: The "unknown" annotation state indicates that a pointer's contract has
+// not yet been positively identified. The unknown state therefore acts as a
+// form of documentation of your technical debt, and a codebase that adopts
+// nullability annotations should aspire to annotate every pointer as either
+// "non-null" or "nullable".
+//
+// -----------------------------------------------------------------------------
+// Applicability of Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// By default, nullability annotations are applicable to raw and smart
+// pointers. User-defined types can indicate compatibility with nullability
+// annotations by providing an `absl_nullability_compatible` nested type. The
+// actual definition of this inner type is not relevant as it is used merely as
+// a marker. It is common to use a using declaration of
+// `absl_nullability_compatible` set to void.
+//
+// // Example:
+// struct MyPtr {
+//   using absl_nullability_compatible = void;
+//   ...
+// };
+//
+// DISCLAIMER:
+// ===========================================================================
+// These nullability annotations are primarily a human readable signal about the
+// intended contract of the pointer. They are not *types* and do not currently
+// provide any correctness guarantees. For example, a pointer annotated as
+// `Nonnull<T*>` is *not guaranteed* to be non-null, and the compiler won't
+// alert or prevent assignment of a `Nullable<T*>` to a `Nonnull<T*>`.
+// ===========================================================================
+#ifndef ABSL_BASE_NULLABILITY_H_
+#define ABSL_BASE_NULLABILITY_H_
+
+#include "absl/base/internal/nullability_impl.h"
+
+namespace absl {
+
+// absl::Nonnull
+//
+// The indicated pointer is never null. It is the responsibility of the provider
+// of this pointer across an API boundary to ensure that the pointer is never be
+// set to null. Consumers of this pointer across an API boundary may safely
+// dereference the pointer.
+//
+// Example:
+//
+// // `employee` is designated as not null.
+// void PaySalary(absl::Nonnull<Employee *> employee) {
+//   pay(*employee);  // OK to dereference
+// }
+template <typename T>
+using Nonnull = nullability_internal::NonnullImpl<T>;
+
+// absl::Nullable
+//
+// The indicated pointer may, by design, be either null or non-null. Consumers
+// of this pointer across an API boundary should perform a `nullptr` check
+// before performing any operation using the pointer.
+//
+// Example:
+//
+// // `employee` may  be null.
+// void PaySalary(absl::Nullable<Employee *> employee) {
+//   if (employee != nullptr) {
+//     Pay(*employee);  // OK to dereference
+//   }
+// }
+template <typename T>
+using Nullable = nullability_internal::NullableImpl<T>;
+
+// absl::NullabilityUnknown (default)
+//
+// The indicated pointer has not yet been determined to be definitively
+// "non-null" or "nullable." Providers of such pointers across API boundaries
+// should, over time, annotate such pointers as either "non-null" or "nullable."
+// Consumers of these pointers across an API boundary should treat such pointers
+// with the same caution they treat currently unannotated pointers. Most
+// existing code will have "unknown"  pointers, which should eventually be
+// migrated into one of the above two nullability states: `Nonnull<T>` or
+//  `Nullable<T>`.
+//
+// NOTE: Because this annotation is the global default state, pointers without
+// any annotation are assumed to have "unknown" semantics. This assumption is
+// designed to minimize churn and reduce clutter within the codebase.
+//
+// Example:
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(absl::NullabilityUnknown<Employee *> employee) {
+//   Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+//
+// Note that a pointer without an annotation, by default, is assumed to have the
+// annotation `NullabilityUnknown`.
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(Employee* employee) {
+//   Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+template <typename T>
+using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_NULLABILITY_H_
diff --git a/abseil-cpp/absl/base/nullability_test.cc b/abseil-cpp/absl/base/nullability_test.cc
new file mode 100644
index 0000000..028ea6c
--- /dev/null
+++ b/abseil-cpp/absl/base/nullability_test.cc
@@ -0,0 +1,129 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/nullability.h"
+
+#include <cassert>
+#include <memory>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+
+namespace {
+using ::absl::Nonnull;
+using ::absl::NullabilityUnknown;
+using ::absl::Nullable;
+
+void funcWithNonnullArg(Nonnull<int*> /*arg*/) {}
+template <typename T>
+void funcWithDeducedNonnullArg(Nonnull<T*> /*arg*/) {}
+
+TEST(NonnullTest, NonnullArgument) {
+  int var = 0;
+  funcWithNonnullArg(&var);
+  funcWithDeducedNonnullArg(&var);
+}
+
+Nonnull<int*> funcWithNonnullReturn() {
+  static int var = 0;
+  return &var;
+}
+
+TEST(NonnullTest, NonnullReturn) {
+  auto var = funcWithNonnullReturn();
+  (void)var;
+}
+
+TEST(PassThroughTest, PassesThroughRawPointerToInt) {
+  EXPECT_TRUE((std::is_same<Nonnull<int*>, int*>::value));
+  EXPECT_TRUE((std::is_same<Nullable<int*>, int*>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<int*>, int*>::value));
+}
+
+TEST(PassThroughTest, PassesThroughRawPointerToVoid) {
+  EXPECT_TRUE((std::is_same<Nonnull<void*>, void*>::value));
+  EXPECT_TRUE((std::is_same<Nullable<void*>, void*>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<void*>, void*>::value));
+}
+
+TEST(PassThroughTest, PassesThroughUniquePointerToInt) {
+  using T = std::unique_ptr<int>;
+  EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughSharedPointerToInt) {
+  using T = std::shared_ptr<int>;
+  EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughSharedPointerToVoid) {
+  using T = std::shared_ptr<void>;
+  EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughPointerToMemberObject) {
+  using T = decltype(&std::pair<int, int>::first);
+  EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughPointerToMemberFunction) {
+  using T = decltype(&std::unique_ptr<int>::reset);
+  EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+}  // namespace
+
+// Nullable ADL lookup test
+namespace util {
+// Helper for NullableAdlTest.  Returns true, denoting that argument-dependent
+// lookup found this implementation of DidAdlWin.  Must be in namespace
+// util itself, not a nested anonymous namespace.
+template <typename T>
+bool DidAdlWin(T*) {
+  return true;
+}
+
+// Because this type is defined in namespace util, an unqualified call to
+// DidAdlWin with a pointer to MakeAdlWin will find the above implementation.
+struct MakeAdlWin {};
+}  // namespace util
+
+namespace {
+// Returns false, denoting that ADL did not inspect namespace util.  If it
+// had, the better match (T*) above would have won out over the (...) here.
+bool DidAdlWin(...) { return false; }
+
+TEST(NullableAdlTest, NullableAddsNothingToArgumentDependentLookup) {
+  // Treatment: util::Nullable<int*> contributes nothing to ADL because
+  // int* itself doesn't.
+  EXPECT_FALSE(DidAdlWin((int*)nullptr));
+  EXPECT_FALSE(DidAdlWin((Nullable<int*>)nullptr));
+
+  // Control: Argument-dependent lookup does find the implementation in
+  // namespace util when the underlying pointee type resides there.
+  EXPECT_TRUE(DidAdlWin((util::MakeAdlWin*)nullptr));
+  EXPECT_TRUE(DidAdlWin((Nullable<util::MakeAdlWin*>)nullptr));
+}
+}  // namespace
diff --git a/abseil-cpp/absl/base/optimization.h b/abseil-cpp/absl/base/optimization.h
index 2e31376..ad0121a 100644
--- a/abseil-cpp/absl/base/optimization.h
+++ b/abseil-cpp/absl/base/optimization.h
@@ -22,13 +22,15 @@
 #ifndef ABSL_BASE_OPTIMIZATION_H_
 #define ABSL_BASE_OPTIMIZATION_H_
 
+#include <assert.h>
+
 #include "absl/base/config.h"
 
 // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
 //
-// Instructs the compiler to avoid optimizing tail-call recursion. Use of this
-// macro is useful when you wish to preserve the existing function order within
-// a stack trace for logging, debugging, or profiling purposes.
+// Instructs the compiler to avoid optimizing tail-call recursion. This macro is
+// useful when you wish to preserve the existing function order within a stack
+// trace for logging, debugging, or profiling purposes.
 //
 // Example:
 //
@@ -89,6 +91,7 @@
 #define ABSL_CACHELINE_SIZE 64
 #endif
 #endif
+#endif
 
 #ifndef ABSL_CACHELINE_SIZE
 // A reasonable default guess.  Note that overestimates tend to waste more
@@ -104,9 +107,10 @@
 // Cacheline aligning objects properly allows constructive memory sharing and
 // prevents destructive (or "false") memory sharing.
 //
-// NOTE: this macro should be replaced with usage of `alignas()` using
+// NOTE: callers should replace uses of this macro with `alignas()` using
 // `std::hardware_constructive_interference_size` and/or
-// `std::hardware_destructive_interference_size` when available within C++17.
+// `std::hardware_destructive_interference_size` when C++17 becomes available to
+// them.
 //
 // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
 // for more information.
@@ -138,12 +142,11 @@
 //    the generated machine code.
 // 3) Prefer applying this attribute to individual variables. Avoid
 //    applying it to types. This tends to localize the effect.
+#if defined(__clang__) || defined(__GNUC__)
 #define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
 #elif defined(_MSC_VER)
-#define ABSL_CACHELINE_SIZE 64
 #define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE))
 #else
-#define ABSL_CACHELINE_SIZE 64
 #define ABSL_CACHELINE_ALIGNED
 #endif
 
@@ -178,45 +181,105 @@
 #define ABSL_PREDICT_TRUE(x) (x)
 #endif
 
-// ABSL_INTERNAL_ASSUME(cond)
-// Informs the compiler than a condition is always true and that it can assume
-// it to be true for optimization purposes. The call has undefined behavior if
-// the condition is false.
+// `ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL()` aborts the program in the fastest
+// possible way, with no attempt at logging. One use is to implement hardening
+// aborts with ABSL_OPTION_HARDENED.  Since this is an internal symbol, it
+// should not be used directly outside of Abseil.
+#if ABSL_HAVE_BUILTIN(__builtin_trap) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL() __builtin_trap()
+#else
+#define ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL() abort()
+#endif
+
+// `ABSL_INTERNAL_UNREACHABLE_IMPL()` is the platform specific directive to
+// indicate that a statement is unreachable, and to allow the compiler to
+// optimize accordingly. Clients should use `ABSL_UNREACHABLE()`, which is
+// defined below.
+#if defined(__cpp_lib_unreachable) && __cpp_lib_unreachable >= 202202L
+#define ABSL_INTERNAL_UNREACHABLE_IMPL() std::unreachable()
+#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
+#define ABSL_INTERNAL_UNREACHABLE_IMPL() __builtin_unreachable()
+#elif ABSL_HAVE_BUILTIN(__builtin_assume)
+#define ABSL_INTERNAL_UNREACHABLE_IMPL() __builtin_assume(false)
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_UNREACHABLE_IMPL() __assume(false)
+#else
+#define ABSL_INTERNAL_UNREACHABLE_IMPL()
+#endif
+
+// `ABSL_UNREACHABLE()` is an unreachable statement.  A program which reaches
+// one has undefined behavior, and the compiler may optimize accordingly.
+#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG)
+// Abort in hardened mode to avoid dangerous undefined behavior.
+#define ABSL_UNREACHABLE()                \
+  do {                                    \
+    ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \
+    ABSL_INTERNAL_UNREACHABLE_IMPL();     \
+  } while (false)
+#else
+// The assert only fires in debug mode to aid in debugging.
+// When NDEBUG is defined, reaching ABSL_UNREACHABLE() is undefined behavior.
+#define ABSL_UNREACHABLE()                       \
+  do {                                           \
+    /* NOLINTNEXTLINE: misc-static-assert */     \
+    assert(false && "ABSL_UNREACHABLE reached"); \
+    ABSL_INTERNAL_UNREACHABLE_IMPL();            \
+  } while (false)
+#endif
+
+// ABSL_ASSUME(cond)
+//
+// Informs the compiler that a condition is always true and that it can assume
+// it to be true for optimization purposes.
+//
+// WARNING: If the condition is false, the program can produce undefined and
+// potentially dangerous behavior.
+//
 // In !NDEBUG mode, the condition is checked with an assert().
-// NOTE: The expression must not have side effects, as it will only be evaluated
-// in some compilation modes and not others.
+//
+// NOTE: The expression must not have side effects, as it may only be evaluated
+// in some compilation modes and not others. Some compilers may issue a warning
+// if the compiler cannot prove the expression has no side effects. For example,
+// the expression should not use a function call since the compiler cannot prove
+// that a function call does not have side effects.
 //
 // Example:
 //
 //   int x = ...;
-//   ABSL_INTERNAL_ASSUME(x >= 0);
+//   ABSL_ASSUME(x >= 0);
 //   // The compiler can optimize the division to a simple right shift using the
 //   // assumption specified above.
 //   int y = x / 16;
 //
 #if !defined(NDEBUG)
-#define ABSL_INTERNAL_ASSUME(cond) assert(cond)
+#define ABSL_ASSUME(cond) assert(cond)
 #elif ABSL_HAVE_BUILTIN(__builtin_assume)
-#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond)
+#define ABSL_ASSUME(cond) __builtin_assume(cond)
+#elif defined(_MSC_VER)
+#define ABSL_ASSUME(cond) __assume(cond)
+#elif defined(__cpp_lib_unreachable) && __cpp_lib_unreachable >= 202202L
+#define ABSL_ASSUME(cond)            \
+  do {                               \
+    if (!(cond)) std::unreachable(); \
+  } while (false)
 #elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
-#define ABSL_INTERNAL_ASSUME(cond)        \
+#define ABSL_ASSUME(cond)                 \
   do {                                    \
     if (!(cond)) __builtin_unreachable(); \
-  } while (0)
-#elif defined(_MSC_VER)
-#define ABSL_INTERNAL_ASSUME(cond) __assume(cond)
+  } while (false)
 #else
-#define ABSL_INTERNAL_ASSUME(cond)      \
+#define ABSL_ASSUME(cond)               \
   do {                                  \
     static_cast<void>(false && (cond)); \
-  } while (0)
+  } while (false)
 #endif
 
 // ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond)
 // This macro forces small unique name on a static file level symbols like
 // static local variables or static functions. This is intended to be used in
 // macro definitions to optimize the cost of generated code. Do NOT use it on
-// symbols exported from translation unit since it may casue a link time
+// symbols exported from translation unit since it may cause a link time
 // conflict.
 //
 // Example:
diff --git a/abseil-cpp/absl/base/options.h b/abseil-cpp/absl/base/options.h
index 6a0fb07..09b98ee 100644
--- a/abseil-cpp/absl/base/options.h
+++ b/abseil-cpp/absl/base/options.h
@@ -67,12 +67,6 @@
 #ifndef ABSL_BASE_OPTIONS_H_
 #define ABSL_BASE_OPTIONS_H_
 
-// Include a standard library header to allow configuration based on the
-// standard library in use.
-#ifdef __cplusplus
-#include <ciso646>
-#endif
-
 // -----------------------------------------------------------------------------
 // Type Compatibility Options
 // -----------------------------------------------------------------------------
@@ -100,7 +94,7 @@
 // User code should not inspect this macro.  To check in the preprocessor if
 // absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY.
 
-#define ABSL_OPTION_USE_STD_ANY 2
+#define ABSL_OPTION_USE_STD_ANY 1
 
 
 // ABSL_OPTION_USE_STD_OPTIONAL
@@ -127,7 +121,7 @@
 // absl::optional is a typedef of std::optional, use the feature macro
 // ABSL_USES_STD_OPTIONAL.
 
-#define ABSL_OPTION_USE_STD_OPTIONAL 2
+#define ABSL_OPTION_USE_STD_OPTIONAL 1
 
 
 // ABSL_OPTION_USE_STD_STRING_VIEW
@@ -154,7 +148,7 @@
 // absl::string_view is a typedef of std::string_view, use the feature macro
 // ABSL_USES_STD_STRING_VIEW.
 
-#define ABSL_OPTION_USE_STD_STRING_VIEW 2
+#define ABSL_OPTION_USE_STD_STRING_VIEW 1
 
 // ABSL_OPTION_USE_STD_VARIANT
 //
@@ -180,7 +174,7 @@
 // absl::variant is a typedef of std::variant, use the feature macro
 // ABSL_USES_STD_VARIANT.
 
-#define ABSL_OPTION_USE_STD_VARIANT 2
+#define ABSL_OPTION_USE_STD_VARIANT 1
 
 
 // ABSL_OPTION_USE_INLINE_NAMESPACE
@@ -206,7 +200,7 @@
 // allowed.
 
 #define ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_2020_09_23
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20230802
 
 // ABSL_OPTION_HARDENED
 //
diff --git a/abseil-cpp/absl/base/policy_checks.h b/abseil-cpp/absl/base/policy_checks.h
index 06b3243..372e848 100644
--- a/abseil-cpp/absl/base/policy_checks.h
+++ b/abseil-cpp/absl/base/policy_checks.h
@@ -44,17 +44,17 @@
 // Toolchain Check
 // -----------------------------------------------------------------------------
 
-// We support MSVC++ 14.0 update 2 and later.
+// We support Visual Studio 2019 (MSVC++ 16.0) and later.
 // This minimum will go up.
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__)
-#error "This package requires Visual Studio 2015 Update 2 or higher."
+#if defined(_MSC_VER) && _MSC_VER < 1920 && !defined(__clang__)
+#error "This package requires Visual Studio 2019 (MSVC++ 16.0) or higher."
 #endif
 
-// We support gcc 4.7 and later.
+// We support GCC 7 and later.
 // This minimum will go up.
 #if defined(__GNUC__) && !defined(__clang__)
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)
-#error "This package requires gcc 4.7 or higher."
+#if __GNUC__ < 7
+#error "This package requires GCC 7 or higher."
 #endif
 #endif
 
@@ -69,13 +69,15 @@
 // C++ Version Check
 // -----------------------------------------------------------------------------
 
-// Enforce C++11 as the minimum.  Note that Visual Studio has not
-// advanced __cplusplus despite being good enough for our purposes, so
-// so we exempt it from the check.
-#if defined(__cplusplus) && !defined(_MSC_VER)
-#if __cplusplus < 201103L
-#error "C++ versions less than C++11 are not supported."
-#endif
+// Enforce C++14 as the minimum.
+#if defined(_MSVC_LANG)
+#if _MSVC_LANG < 201402L
+#error "C++ versions less than C++14 are not supported."
+#endif  // _MSVC_LANG < 201402L
+#elif defined(__cplusplus)
+#if __cplusplus < 201402L
+#error "C++ versions less than C++14 are not supported."
+#endif  // __cplusplus < 201402L
 #endif
 
 // -----------------------------------------------------------------------------
diff --git a/abseil-cpp/absl/base/port.h b/abseil-cpp/absl/base/port.h
index 6c28068..5bc4d6c 100644
--- a/abseil-cpp/absl/base/port.h
+++ b/abseil-cpp/absl/base/port.h
@@ -14,7 +14,6 @@
 //
 // This files is a forwarding header for other headers containing various
 // portability macros and functions.
-// This file is used for both C and C++!
 
 #ifndef ABSL_BASE_PORT_H_
 #define ABSL_BASE_PORT_H_
diff --git a/abseil-cpp/absl/base/prefetch.h b/abseil-cpp/absl/base/prefetch.h
new file mode 100644
index 0000000..de7a180
--- /dev/null
+++ b/abseil-cpp/absl/base/prefetch.h
@@ -0,0 +1,198 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: prefetch.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines prefetch functions to prefetch memory contents
+// into the first level cache (L1) for the current CPU. The prefetch logic
+// offered in this header is limited to prefetching first level cachelines
+// only, and is aimed at relatively 'simple' prefetching logic.
+//
+#ifndef ABSL_BASE_PREFETCH_H_
+#define ABSL_BASE_PREFETCH_H_
+
+#include "absl/base/config.h"
+
+#if defined(ABSL_INTERNAL_HAVE_SSE)
+#include <xmmintrin.h>
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900 && \
+    (defined(_M_X64) || defined(_M_IX86))
+#include <intrin.h>
+#pragma intrinsic(_mm_prefetch)
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// Moves data into the L1 cache before it is read, or "prefetches" it.
+//
+// The value of `addr` is the address of the memory to prefetch. If
+// the target and compiler support it, data prefetch instructions are
+// generated. If the prefetch is done some time before the memory is
+// read, it may be in the cache by the time the read occurs.
+//
+// This method prefetches data with the highest degree of temporal locality;
+// data is prefetched where possible into all levels of the cache.
+//
+// Incorrect or gratuitous use of this function can degrade performance.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+//  // Computes incremental checksum for `data`.
+//  int ComputeChecksum(int sum, absl::string_view data);
+//
+//  // Computes cumulative checksum for all values in `data`
+//  int ComputeChecksum(absl::Span<const std::string> data) {
+//    int sum = 0;
+//    auto it = data.begin();
+//    auto pit = data.begin();
+//    auto end = data.end();
+//    for (int dist = 8; dist > 0 && pit != data.end(); --dist, ++pit) {
+//      absl::PrefetchToLocalCache(pit->data());
+//    }
+//    for (; pit != end; ++pit, ++it) {
+//      sum = ComputeChecksum(sum, *it);
+//      absl::PrefetchToLocalCache(pit->data());
+//    }
+//    for (; it != end; ++it) {
+//      sum = ComputeChecksum(sum, *it);
+//    }
+//    return sum;
+//  }
+//
+void PrefetchToLocalCache(const void* addr);
+
+// Moves data into the L1 cache before it is read, or "prefetches" it.
+//
+// This function is identical to `PrefetchToLocalCache()` except that it has
+// non-temporal locality: the fetched data should not be left in any of the
+// cache tiers. This is useful for cases where the data is used only once /
+// short term, for example, invoking a destructor on an object.
+//
+// Incorrect or gratuitous use of this function can degrade performance.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+//  template <typename Iterator>
+//  void DestroyPointers(Iterator begin, Iterator end) {
+//    size_t distance = std::min(8U, bars.size());
+//
+//    int dist = 8;
+//    auto prefetch_it = begin;
+//    while (prefetch_it != end && --dist;) {
+//      absl::PrefetchToLocalCacheNta(*prefetch_it++);
+//    }
+//    while (prefetch_it != end) {
+//      delete *begin++;
+//      absl::PrefetchToLocalCacheNta(*prefetch_it++);
+//    }
+//    while (begin != end) {
+//      delete *begin++;
+//    }
+//  }
+//
+void PrefetchToLocalCacheNta(const void* addr);
+
+// Moves data into the L1 cache with the intent to modify it.
+//
+// This function is similar to `PrefetchToLocalCache()` except that it
+// prefetches cachelines with an 'intent to modify' This typically includes
+// invalidating cache entries for this address in all other cache tiers, and an
+// exclusive access intent.
+//
+// Incorrect or gratuitous use of this function can degrade performance. As this
+// function can invalidate cached cachelines on other caches and computer cores,
+// incorrect usage of this function can have an even greater negative impact
+// than incorrect regular prefetches.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+//  void* Arena::Allocate(size_t size) {
+//    void* ptr = AllocateBlock(size);
+//    absl::PrefetchToLocalCacheForWrite(p);
+//    return ptr;
+//  }
+//
+void PrefetchToLocalCacheForWrite(const void* addr);
+
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+
+#define ABSL_HAVE_PREFETCH 1
+
+// See __builtin_prefetch:
+// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
+//
+inline void PrefetchToLocalCache(const void* addr) {
+  __builtin_prefetch(addr, 0, 3);
+}
+
+inline void PrefetchToLocalCacheNta(const void* addr) {
+  __builtin_prefetch(addr, 0, 0);
+}
+
+inline void PrefetchToLocalCacheForWrite(const void* addr) {
+  // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1)
+  // unless -march=broadwell or newer; this is not generally the default, so we
+  // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel
+  // processors and has been present on AMD processors since the K6-2.
+#if defined(__x86_64__)
+  asm("prefetchw (%0)" : : "r"(addr));
+#else
+  __builtin_prefetch(addr, 1, 3);
+#endif
+}
+
+#elif defined(ABSL_INTERNAL_HAVE_SSE)
+
+#define ABSL_HAVE_PREFETCH 1
+
+inline void PrefetchToLocalCache(const void* addr) {
+  _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
+}
+
+inline void PrefetchToLocalCacheNta(const void* addr) {
+  _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
+}
+
+inline void PrefetchToLocalCacheForWrite(const void* addr) {
+#if defined(_MM_HINT_ET0)
+  _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_ET0);
+#elif !defined(_MSC_VER) && defined(__x86_64__)
+  // _MM_HINT_ET0 is not universally supported. As we commented further
+  // up, PREFETCHW is recognized as a no-op on older Intel processors
+  // and has been present on AMD processors since the K6-2. We have this
+  // disabled for MSVC compilers as this miscompiles on older MSVC compilers.
+  asm("prefetchw (%0)" : : "r"(addr));
+#endif
+}
+
+#else
+
+inline void PrefetchToLocalCache(const void* addr) {}
+inline void PrefetchToLocalCacheNta(const void* addr) {}
+inline void PrefetchToLocalCacheForWrite(const void* addr) {}
+
+#endif
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_PREFETCH_H_
diff --git a/abseil-cpp/absl/base/prefetch_test.cc b/abseil-cpp/absl/base/prefetch_test.cc
new file mode 100644
index 0000000..ee21989
--- /dev/null
+++ b/abseil-cpp/absl/base/prefetch_test.cc
@@ -0,0 +1,64 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/prefetch.h"
+
+#include <memory>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+// Below tests exercise the functions only to guarantee they compile and execute
+// correctly. We make no attempt at verifying any prefetch instructions being
+// generated and executed: we assume the various implementation in terms of
+// __builtin_prefetch() or x86 intrinsics to be correct and well tested.
+
+TEST(PrefetchTest, PrefetchToLocalCache_StackA) {
+  char buf[100] = {};
+  absl::PrefetchToLocalCache(buf);
+  absl::PrefetchToLocalCacheNta(buf);
+  absl::PrefetchToLocalCacheForWrite(buf);
+}
+
+TEST(PrefetchTest, PrefetchToLocalCache_Heap) {
+  auto memory = std::make_unique<char[]>(200 << 10);
+  memset(memory.get(), 0, 200 << 10);
+  absl::PrefetchToLocalCache(memory.get());
+  absl::PrefetchToLocalCacheNta(memory.get());
+  absl::PrefetchToLocalCacheForWrite(memory.get());
+  absl::PrefetchToLocalCache(memory.get() + (50 << 10));
+  absl::PrefetchToLocalCacheNta(memory.get() + (50 << 10));
+  absl::PrefetchToLocalCacheForWrite(memory.get() + (50 << 10));
+  absl::PrefetchToLocalCache(memory.get() + (100 << 10));
+  absl::PrefetchToLocalCacheNta(memory.get() + (100 << 10));
+  absl::PrefetchToLocalCacheForWrite(memory.get() + (100 << 10));
+  absl::PrefetchToLocalCache(memory.get() + (150 << 10));
+  absl::PrefetchToLocalCacheNta(memory.get() + (150 << 10));
+  absl::PrefetchToLocalCacheForWrite(memory.get() + (150 << 10));
+}
+
+TEST(PrefetchTest, PrefetchToLocalCache_Nullptr) {
+  absl::PrefetchToLocalCache(nullptr);
+  absl::PrefetchToLocalCacheNta(nullptr);
+  absl::PrefetchToLocalCacheForWrite(nullptr);
+}
+
+TEST(PrefetchTest, PrefetchToLocalCache_InvalidPtr) {
+  absl::PrefetchToLocalCache(reinterpret_cast<const void*>(0x785326532L));
+  absl::PrefetchToLocalCacheNta(reinterpret_cast<const void*>(0x785326532L));
+  absl::PrefetchToLocalCacheForWrite(reinterpret_cast<const void*>(0x78532L));
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/base/spinlock_test_common.cc b/abseil-cpp/absl/base/spinlock_test_common.cc
index dee266e..52ecf58 100644
--- a/abseil-cpp/absl/base/spinlock_test_common.cc
+++ b/abseil-cpp/absl/base/spinlock_test_common.cc
@@ -34,7 +34,7 @@
 #include "absl/synchronization/blocking_counter.h"
 #include "absl/synchronization/notification.h"
 
-constexpr int32_t kNumThreads = 10;
+constexpr uint32_t kNumThreads = 10;
 constexpr int32_t kIters = 1000;
 
 namespace absl {
@@ -48,14 +48,14 @@
                                    int64_t wait_end_time) {
     return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
   }
-  static uint64_t DecodeWaitCycles(uint32_t lock_value) {
+  static int64_t DecodeWaitCycles(uint32_t lock_value) {
     return SpinLock::DecodeWaitCycles(lock_value);
   }
 };
 
 namespace {
 
-static constexpr int kArrayLength = 10;
+static constexpr size_t kArrayLength = 10;
 static uint32_t values[kArrayLength];
 
 ABSL_CONST_INIT static SpinLock static_cooperative_spinlock(
@@ -79,11 +79,11 @@
   return c;
 }
 
-static void TestFunction(int thread_salt, SpinLock* spinlock) {
+static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) {
   for (int i = 0; i < kIters; i++) {
     SpinLockHolder h(spinlock);
-    for (int j = 0; j < kArrayLength; j++) {
-      const int index = (j + thread_salt) % kArrayLength;
+    for (size_t j = 0; j < kArrayLength; j++) {
+      const size_t index = (j + thread_salt) % kArrayLength;
       values[index] = Hash32(values[index], thread_salt);
       std::this_thread::yield();
     }
@@ -92,7 +92,8 @@
 
 static void ThreadedTest(SpinLock* spinlock) {
   std::vector<std::thread> threads;
-  for (int i = 0; i < kNumThreads; ++i) {
+  threads.reserve(kNumThreads);
+  for (uint32_t i = 0; i < kNumThreads; ++i) {
     threads.push_back(std::thread(TestFunction, i, spinlock));
   }
   for (auto& thread : threads) {
@@ -100,7 +101,7 @@
   }
 
   SpinLockHolder h(spinlock);
-  for (int i = 1; i < kArrayLength; i++) {
+  for (size_t i = 1; i < kArrayLength; i++) {
     EXPECT_EQ(values[0], values[i]);
   }
 }
@@ -132,28 +133,28 @@
   // but the lower kProfileTimestampShift will be dropped.
   const int kMaxCyclesShift =
     32 - kLockwordReservedShift + kProfileTimestampShift;
-  const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
+  const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
 
   // These bits should be zero after encoding.
   const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
 
   // These bits are dropped when wait cycles are encoded.
-  const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
+  const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
 
   // Test a bunch of random values
   std::default_random_engine generator;
   // Shift to avoid overflow below.
-  std::uniform_int_distribution<uint64_t> time_distribution(
-      0, std::numeric_limits<uint64_t>::max() >> 4);
-  std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
+  std::uniform_int_distribution<int64_t> time_distribution(
+      0, std::numeric_limits<int64_t>::max() >> 3);
+  std::uniform_int_distribution<int64_t> cycle_distribution(0, kMaxCycles);
 
   for (int i = 0; i < 100; i++) {
     int64_t start_time = time_distribution(generator);
     int64_t cycles = cycle_distribution(generator);
     int64_t end_time = start_time + cycles;
     uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
-    EXPECT_EQ(0, lock_value & kLockwordReservedMask);
-    uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
+    EXPECT_EQ(0u, lock_value & kLockwordReservedMask);
+    int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
     EXPECT_EQ(0, decoded & kProfileTimestampMask);
     EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
   }
@@ -177,21 +178,21 @@
   // Test clamping
   uint32_t max_value =
     SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
-  uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
-  uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
+  int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
+  int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
   EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
 
   const int64_t step = (1 << kProfileTimestampShift);
   uint32_t after_max_value =
     SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
-  uint64_t after_max_value_decoded =
+  int64_t after_max_value_decoded =
       SpinLockTest::DecodeWaitCycles(after_max_value);
   EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
 
   uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
       start_time, start_time + kMaxCycles - step);
-  uint64_t before_max_value_decoded =
-    SpinLockTest::DecodeWaitCycles(before_max_value);
+  int64_t before_max_value_decoded =
+      SpinLockTest::DecodeWaitCycles(before_max_value);
   EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
 }
 
diff --git a/abseil-cpp/absl/base/thread_annotations.h b/abseil-cpp/absl/base/thread_annotations.h
index e23fff1..bc8a620 100644
--- a/abseil-cpp/absl/base/thread_annotations.h
+++ b/abseil-cpp/absl/base/thread_annotations.h
@@ -154,8 +154,8 @@
 
 // ABSL_LOCKS_EXCLUDED()
 //
-// Documents the locks acquired in the body of the function. These locks
-// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// Documents the locks that cannot be held by callers of this function, as they
+// might be acquired by this function (Abseil's `Mutex` locks are
 // non-reentrant).
 #if ABSL_HAVE_ATTRIBUTE(locks_excluded)
 #define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
@@ -317,7 +317,7 @@
 
 // Takes a reference to a guarded data member, and returns an unguarded
 // reference.
-// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead.
+// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
 template <typename T>
 inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
   return v;
diff --git a/abseil-cpp/absl/base/throw_delegate_test.cc b/abseil-cpp/absl/base/throw_delegate_test.cc
index 5ba4ce5..e74362b 100644
--- a/abseil-cpp/absl/base/throw_delegate_test.cc
+++ b/abseil-cpp/absl/base/throw_delegate_test.cc
@@ -78,29 +78,97 @@
 #endif
 }
 
-TEST(ThrowHelper, Test) {
-  // Not using EXPECT_THROW because we want to check the .what() message too.
+TEST(ThrowDelegate, ThrowStdLogicErrorChar) {
   ExpectThrowChar<std::logic_error>(ThrowStdLogicError);
+}
+
+TEST(ThrowDelegate, ThrowStdInvalidArgumentChar) {
   ExpectThrowChar<std::invalid_argument>(ThrowStdInvalidArgument);
+}
+
+TEST(ThrowDelegate, ThrowStdDomainErrorChar) {
   ExpectThrowChar<std::domain_error>(ThrowStdDomainError);
+}
+
+TEST(ThrowDelegate, ThrowStdLengthErrorChar) {
   ExpectThrowChar<std::length_error>(ThrowStdLengthError);
+}
+
+TEST(ThrowDelegate, ThrowStdOutOfRangeChar) {
   ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange);
+}
+
+TEST(ThrowDelegate, ThrowStdRuntimeErrorChar) {
   ExpectThrowChar<std::runtime_error>(ThrowStdRuntimeError);
+}
+
+TEST(ThrowDelegate, ThrowStdRangeErrorChar) {
   ExpectThrowChar<std::range_error>(ThrowStdRangeError);
+}
+
+TEST(ThrowDelegate, ThrowStdOverflowErrorChar) {
   ExpectThrowChar<std::overflow_error>(ThrowStdOverflowError);
+}
+
+TEST(ThrowDelegate, ThrowStdUnderflowErrorChar) {
   ExpectThrowChar<std::underflow_error>(ThrowStdUnderflowError);
+}
 
+TEST(ThrowDelegate, ThrowStdLogicErrorString) {
   ExpectThrowString<std::logic_error>(ThrowStdLogicError);
-  ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument);
-  ExpectThrowString<std::domain_error>(ThrowStdDomainError);
-  ExpectThrowString<std::length_error>(ThrowStdLengthError);
-  ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
-  ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError);
-  ExpectThrowString<std::range_error>(ThrowStdRangeError);
-  ExpectThrowString<std::overflow_error>(ThrowStdOverflowError);
-  ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError);
+}
 
-  ExpectThrowNoWhat<std::bad_function_call>(ThrowStdBadFunctionCall);
+TEST(ThrowDelegate, ThrowStdInvalidArgumentString) {
+  ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument);
+}
+
+TEST(ThrowDelegate, ThrowStdDomainErrorString) {
+  ExpectThrowString<std::domain_error>(ThrowStdDomainError);
+}
+
+TEST(ThrowDelegate, ThrowStdLengthErrorString) {
+  ExpectThrowString<std::length_error>(ThrowStdLengthError);
+}
+
+TEST(ThrowDelegate, ThrowStdOutOfRangeString) {
+  ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
+}
+
+TEST(ThrowDelegate, ThrowStdRuntimeErrorString) {
+  ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError);
+}
+
+TEST(ThrowDelegate, ThrowStdRangeErrorString) {
+  ExpectThrowString<std::range_error>(ThrowStdRangeError);
+}
+
+TEST(ThrowDelegate, ThrowStdOverflowErrorString) {
+  ExpectThrowString<std::overflow_error>(ThrowStdOverflowError);
+}
+
+TEST(ThrowDelegate, ThrowStdUnderflowErrorString) {
+  ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError);
+}
+
+TEST(ThrowDelegate, ThrowStdBadFunctionCallNoWhat) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  try {
+    ThrowStdBadFunctionCall();
+    FAIL() << "Didn't throw";
+  } catch (const std::bad_function_call&) {
+  }
+#ifdef _LIBCPP_VERSION
+  catch (const std::exception&) {
+    // https://reviews.llvm.org/D92397 causes issues with the vtable for
+    // std::bad_function_call when using libc++ as a shared library.
+  }
+#endif
+#else
+  EXPECT_DEATH_IF_SUPPORTED(ThrowStdBadFunctionCall(), "");
+#endif
+}
+
+TEST(ThrowDelegate, ThrowStdBadAllocNoWhat) {
   ExpectThrowNoWhat<std::bad_alloc>(ThrowStdBadAlloc);
 }
 
diff --git a/abseil-cpp/absl/cleanup/BUILD.bazel b/abseil-cpp/absl/cleanup/BUILD.bazel
new file mode 100644
index 0000000..2154d9f
--- /dev/null
+++ b/abseil-cpp/absl/cleanup/BUILD.bazel
@@ -0,0 +1,65 @@
+# Copyright 2021 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+cc_library(
+    name = "cleanup_internal",
+    hdrs = ["internal/cleanup.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:base_internal",
+        "//absl/base:core_headers",
+        "//absl/utility",
+    ],
+)
+
+cc_library(
+    name = "cleanup",
+    hdrs = [
+        "cleanup.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":cleanup_internal",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_test(
+    name = "cleanup_test",
+    size = "small",
+    srcs = [
+        "cleanup_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":cleanup",
+        "//absl/base:config",
+        "//absl/utility",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/abseil-cpp/absl/cleanup/CMakeLists.txt b/abseil-cpp/absl/cleanup/CMakeLists.txt
new file mode 100644
index 0000000..f5af40b
--- /dev/null
+++ b/abseil-cpp/absl/cleanup/CMakeLists.txt
@@ -0,0 +1,56 @@
+# Copyright 2021 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cleanup_internal
+  HDRS
+    "internal/cleanup.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base_internal
+    absl::core_headers
+    absl::utility
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    cleanup
+  HDRS
+    "cleanup.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::cleanup_internal
+    absl::config
+    absl::core_headers
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    cleanup_test
+  SRCS
+    "cleanup_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::cleanup
+    absl::config
+    absl::utility
+    GTest::gmock_main
+)
diff --git a/abseil-cpp/absl/cleanup/cleanup.h b/abseil-cpp/absl/cleanup/cleanup.h
new file mode 100644
index 0000000..960ccd0
--- /dev/null
+++ b/abseil-cpp/absl/cleanup/cleanup.h
@@ -0,0 +1,140 @@
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: cleanup.h
+// -----------------------------------------------------------------------------
+//
+// `absl::Cleanup` implements the scope guard idiom, invoking the contained
+// callback's `operator()() &&` on scope exit.
+//
+// Example:
+//
+// ```
+//   absl::Status CopyGoodData(const char* source_path, const char* sink_path) {
+//     FILE* source_file = fopen(source_path, "r");
+//     if (source_file == nullptr) {
+//       return absl::NotFoundError("No source file");  // No cleanups execute
+//     }
+//
+//     // C++17 style cleanup using class template argument deduction
+//     absl::Cleanup source_closer = [source_file] { fclose(source_file); };
+//
+//     FILE* sink_file = fopen(sink_path, "w");
+//     if (sink_file == nullptr) {
+//       return absl::NotFoundError("No sink file");  // First cleanup executes
+//     }
+//
+//     // C++11 style cleanup using the factory function
+//     auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); });
+//
+//     Data data;
+//     while (ReadData(source_file, &data)) {
+//       if (!data.IsGood()) {
+//         absl::Status result = absl::FailedPreconditionError("Read bad data");
+//         return result;  // Both cleanups execute
+//       }
+//       SaveData(sink_file, &data);
+//     }
+//
+//     return absl::OkStatus();  // Both cleanups execute
+//   }
+// ```
+//
+// Methods:
+//
+// `std::move(cleanup).Cancel()` will prevent the callback from executing.
+//
+// `std::move(cleanup).Invoke()` will execute the callback early, before
+// destruction, and prevent the callback from executing in the destructor.
+//
+// Usage:
+//
+// `absl::Cleanup` is not an interface type. It is only intended to be used
+// within the body of a function. It is not a value type and instead models a
+// control flow construct. Check out `defer` in Golang for something similar.
+
+#ifndef ABSL_CLEANUP_CLEANUP_H_
+#define ABSL_CLEANUP_CLEANUP_H_
+
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/cleanup/internal/cleanup.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+template <typename Arg, typename Callback = void()>
+class ABSL_MUST_USE_RESULT Cleanup final {
+  static_assert(cleanup_internal::WasDeduced<Arg>(),
+                "Explicit template parameters are not supported.");
+
+  static_assert(cleanup_internal::ReturnsVoid<Callback>(),
+                "Callbacks that return values are not supported.");
+
+ public:
+  Cleanup(Callback callback) : storage_(std::move(callback)) {}  // NOLINT
+
+  Cleanup(Cleanup&& other) = default;
+
+  void Cancel() && {
+    ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
+    storage_.DestroyCallback();
+  }
+
+  void Invoke() && {
+    ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
+    storage_.InvokeCallback();
+    storage_.DestroyCallback();
+  }
+
+  ~Cleanup() {
+    if (storage_.IsCallbackEngaged()) {
+      storage_.InvokeCallback();
+      storage_.DestroyCallback();
+    }
+  }
+
+ private:
+  cleanup_internal::Storage<Callback> storage_;
+};
+
+// `absl::Cleanup c = /* callback */;`
+//
+// C++17 type deduction API for creating an instance of `absl::Cleanup`
+#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+template <typename Callback>
+Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
+#endif  // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+
+// `auto c = absl::MakeCleanup(/* callback */);`
+//
+// C++11 type deduction API for creating an instance of `absl::Cleanup`
+template <typename... Args, typename Callback>
+absl::Cleanup<cleanup_internal::Tag, Callback> MakeCleanup(Callback callback) {
+  static_assert(cleanup_internal::WasDeduced<cleanup_internal::Tag, Args...>(),
+                "Explicit template parameters are not supported.");
+
+  static_assert(cleanup_internal::ReturnsVoid<Callback>(),
+                "Callbacks that return values are not supported.");
+
+  return {std::move(callback)};
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CLEANUP_CLEANUP_H_
diff --git a/abseil-cpp/absl/cleanup/cleanup_test.cc b/abseil-cpp/absl/cleanup/cleanup_test.cc
new file mode 100644
index 0000000..46b8858
--- /dev/null
+++ b/abseil-cpp/absl/cleanup/cleanup_test.cc
@@ -0,0 +1,311 @@
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/cleanup/cleanup.h"
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/utility/utility.h"
+
+namespace {
+
+using Tag = absl::cleanup_internal::Tag;
+
+template <typename Type1, typename Type2>
+constexpr bool IsSame() {
+  return (std::is_same<Type1, Type2>::value);
+}
+
+struct IdentityFactory {
+  template <typename Callback>
+  static Callback AsCallback(Callback callback) {
+    return Callback(std::move(callback));
+  }
+};
+
+// `FunctorClass` is a type used for testing `absl::Cleanup`. It is intended to
+// represent users that make their own move-only callback types outside of
+// `std::function` and lambda literals.
+class FunctorClass {
+  using Callback = std::function<void()>;
+
+ public:
+  explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {}
+
+  FunctorClass(FunctorClass&& other)
+      : callback_(absl::exchange(other.callback_, Callback())) {}
+
+  FunctorClass(const FunctorClass&) = delete;
+
+  FunctorClass& operator=(const FunctorClass&) = delete;
+
+  FunctorClass& operator=(FunctorClass&&) = delete;
+
+  void operator()() const& = delete;
+
+  void operator()() && {
+    ASSERT_TRUE(callback_);
+    callback_();
+    callback_ = nullptr;
+  }
+
+ private:
+  Callback callback_;
+};
+
+struct FunctorClassFactory {
+  template <typename Callback>
+  static FunctorClass AsCallback(Callback callback) {
+    return FunctorClass(std::move(callback));
+  }
+};
+
+struct StdFunctionFactory {
+  template <typename Callback>
+  static std::function<void()> AsCallback(Callback callback) {
+    return std::function<void()>(std::move(callback));
+  }
+};
+
+using CleanupTestParams =
+    ::testing::Types<IdentityFactory, FunctorClassFactory, StdFunctionFactory>;
+template <typename>
+struct CleanupTest : public ::testing::Test {};
+TYPED_TEST_SUITE(CleanupTest, CleanupTestParams);
+
+bool fn_ptr_called = false;
+void FnPtrFunction() { fn_ptr_called = true; }
+
+TYPED_TEST(CleanupTest, FactoryProducesCorrectType) {
+  {
+    auto callback = TypeParam::AsCallback([] {});
+    auto cleanup = absl::MakeCleanup(std::move(callback));
+
+    static_assert(
+        IsSame<absl::Cleanup<Tag, decltype(callback)>, decltype(cleanup)>(),
+        "");
+  }
+
+  {
+    auto cleanup = absl::MakeCleanup(&FnPtrFunction);
+
+    static_assert(IsSame<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>(),
+                  "");
+  }
+
+  {
+    auto cleanup = absl::MakeCleanup(FnPtrFunction);
+
+    static_assert(IsSame<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>(),
+                  "");
+  }
+}
+
+#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+TYPED_TEST(CleanupTest, CTADProducesCorrectType) {
+  {
+    auto callback = TypeParam::AsCallback([] {});
+    absl::Cleanup cleanup = std::move(callback);
+
+    static_assert(
+        IsSame<absl::Cleanup<Tag, decltype(callback)>, decltype(cleanup)>(),
+        "");
+  }
+
+  {
+    absl::Cleanup cleanup = &FnPtrFunction;
+
+    static_assert(IsSame<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>(),
+                  "");
+  }
+
+  {
+    absl::Cleanup cleanup = FnPtrFunction;
+
+    static_assert(IsSame<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>(),
+                  "");
+  }
+}
+
+TYPED_TEST(CleanupTest, FactoryAndCTADProduceSameType) {
+  {
+    auto callback = IdentityFactory::AsCallback([] {});
+    auto factory_cleanup = absl::MakeCleanup(callback);
+    absl::Cleanup deduction_cleanup = callback;
+
+    static_assert(
+        IsSame<decltype(factory_cleanup), decltype(deduction_cleanup)>(), "");
+  }
+
+  {
+    auto factory_cleanup =
+        absl::MakeCleanup(FunctorClassFactory::AsCallback([] {}));
+    absl::Cleanup deduction_cleanup = FunctorClassFactory::AsCallback([] {});
+
+    static_assert(
+        IsSame<decltype(factory_cleanup), decltype(deduction_cleanup)>(), "");
+  }
+
+  {
+    auto factory_cleanup =
+        absl::MakeCleanup(StdFunctionFactory::AsCallback([] {}));
+    absl::Cleanup deduction_cleanup = StdFunctionFactory::AsCallback([] {});
+
+    static_assert(
+        IsSame<decltype(factory_cleanup), decltype(deduction_cleanup)>(), "");
+  }
+
+  {
+    auto factory_cleanup = absl::MakeCleanup(&FnPtrFunction);
+    absl::Cleanup deduction_cleanup = &FnPtrFunction;
+
+    static_assert(
+        IsSame<decltype(factory_cleanup), decltype(deduction_cleanup)>(), "");
+  }
+
+  {
+    auto factory_cleanup = absl::MakeCleanup(FnPtrFunction);
+    absl::Cleanup deduction_cleanup = FnPtrFunction;
+
+    static_assert(
+        IsSame<decltype(factory_cleanup), decltype(deduction_cleanup)>(), "");
+  }
+}
+#endif  // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+
+TYPED_TEST(CleanupTest, BasicUsage) {
+  bool called = false;
+
+  {
+    auto cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
+    EXPECT_FALSE(called);  // Constructor shouldn't invoke the callback
+  }
+
+  EXPECT_TRUE(called);  // Destructor should invoke the callback
+}
+
+TYPED_TEST(CleanupTest, BasicUsageWithFunctionPointer) {
+  fn_ptr_called = false;
+
+  {
+    auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(&FnPtrFunction));
+    EXPECT_FALSE(fn_ptr_called);  // Constructor shouldn't invoke the callback
+  }
+
+  EXPECT_TRUE(fn_ptr_called);  // Destructor should invoke the callback
+}
+
+TYPED_TEST(CleanupTest, Cancel) {
+  bool called = false;
+
+  {
+    auto cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
+    EXPECT_FALSE(called);  // Constructor shouldn't invoke the callback
+
+    std::move(cleanup).Cancel();
+    EXPECT_FALSE(called);  // Cancel shouldn't invoke the callback
+  }
+
+  EXPECT_FALSE(called);  // Destructor shouldn't invoke the callback
+}
+
+TYPED_TEST(CleanupTest, Invoke) {
+  bool called = false;
+
+  {
+    auto cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
+    EXPECT_FALSE(called);  // Constructor shouldn't invoke the callback
+
+    std::move(cleanup).Invoke();
+    EXPECT_TRUE(called);  // Invoke should invoke the callback
+
+    called = false;  // Reset tracker before destructor runs
+  }
+
+  EXPECT_FALSE(called);  // Destructor shouldn't invoke the callback
+}
+
+TYPED_TEST(CleanupTest, Move) {
+  bool called = false;
+
+  {
+    auto moved_from_cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
+    EXPECT_FALSE(called);  // Constructor shouldn't invoke the callback
+
+    {
+      auto moved_to_cleanup = std::move(moved_from_cleanup);
+      EXPECT_FALSE(called);  // Move shouldn't invoke the callback
+    }
+
+    EXPECT_TRUE(called);  // Destructor should invoke the callback
+
+    called = false;  // Reset tracker before destructor runs
+  }
+
+  EXPECT_FALSE(called);  // Destructor shouldn't invoke the callback
+}
+
+int DestructionCount = 0;
+
+struct DestructionCounter {
+  void operator()() {}
+
+  ~DestructionCounter() { ++DestructionCount; }
+};
+
+TYPED_TEST(CleanupTest, DestructorDestroys) {
+  {
+    auto cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter()));
+    DestructionCount = 0;
+  }
+
+  EXPECT_EQ(DestructionCount, 1);  // Engaged cleanup destroys
+}
+
+TYPED_TEST(CleanupTest, CancelDestroys) {
+  {
+    auto cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter()));
+    DestructionCount = 0;
+
+    std::move(cleanup).Cancel();
+    EXPECT_EQ(DestructionCount, 1);  // Cancel destroys
+  }
+
+  EXPECT_EQ(DestructionCount, 1);  // Canceled cleanup does not double destroy
+}
+
+TYPED_TEST(CleanupTest, InvokeDestroys) {
+  {
+    auto cleanup =
+        absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter()));
+    DestructionCount = 0;
+
+    std::move(cleanup).Invoke();
+    EXPECT_EQ(DestructionCount, 1);  // Invoke destroys
+  }
+
+  EXPECT_EQ(DestructionCount, 1);  // Invoked cleanup does not double destroy
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/cleanup/internal/cleanup.h b/abseil-cpp/absl/cleanup/internal/cleanup.h
new file mode 100644
index 0000000..2783fcb
--- /dev/null
+++ b/abseil-cpp/absl/cleanup/internal/cleanup.h
@@ -0,0 +1,100 @@
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_
+#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_
+
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/invoke.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace cleanup_internal {
+
+struct Tag {};
+
+template <typename Arg, typename... Args>
+constexpr bool WasDeduced() {
+  return (std::is_same<cleanup_internal::Tag, Arg>::value) &&
+         (sizeof...(Args) == 0);
+}
+
+template <typename Callback>
+constexpr bool ReturnsVoid() {
+  return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
+}
+
+template <typename Callback>
+class Storage {
+ public:
+  Storage() = delete;
+
+  explicit Storage(Callback callback) {
+    // Placement-new into a character buffer is used for eager destruction when
+    // the cleanup is invoked or cancelled. To ensure this optimizes well, the
+    // behavior is implemented locally instead of using an absl::optional.
+    ::new (GetCallbackBuffer()) Callback(std::move(callback));
+    is_callback_engaged_ = true;
+  }
+
+  Storage(Storage&& other) {
+    ABSL_HARDENING_ASSERT(other.IsCallbackEngaged());
+
+    ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback()));
+    is_callback_engaged_ = true;
+
+    other.DestroyCallback();
+  }
+
+  Storage(const Storage& other) = delete;
+
+  Storage& operator=(Storage&& other) = delete;
+
+  Storage& operator=(const Storage& other) = delete;
+
+  void* GetCallbackBuffer() { return static_cast<void*>(+callback_buffer_); }
+
+  Callback& GetCallback() {
+    return *reinterpret_cast<Callback*>(GetCallbackBuffer());
+  }
+
+  bool IsCallbackEngaged() const { return is_callback_engaged_; }
+
+  void DestroyCallback() {
+    is_callback_engaged_ = false;
+    GetCallback().~Callback();
+  }
+
+  void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS {
+    std::move(GetCallback())();
+  }
+
+ private:
+  bool is_callback_engaged_;
+  alignas(Callback) char callback_buffer_[sizeof(Callback)];
+};
+
+}  // namespace cleanup_internal
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CLEANUP_INTERNAL_CLEANUP_H_
diff --git a/abseil-cpp/absl/compiler_config_setting.bzl b/abseil-cpp/absl/compiler_config_setting.bzl
deleted file mode 100644
index 6696229..0000000
--- a/abseil-cpp/absl/compiler_config_setting.bzl
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Copyright 2018 The Abseil Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Creates config_setting that allows selecting based on 'compiler' value."""
-
-def create_llvm_config(name, visibility):
-    # The "do_not_use_tools_cpp_compiler_present" attribute exists to
-    # distinguish between older versions of Bazel that do not support
-    # "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do.
-    # In the future, the only way to select on the compiler will be through
-    # flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can
-    # be removed.
-    if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"):
-        native.config_setting(
-            name = name,
-            flag_values = {
-                "@bazel_tools//tools/cpp:compiler": "llvm",
-            },
-            visibility = visibility,
-        )
-    else:
-        native.config_setting(
-            name = name,
-            values = {"compiler": "llvm"},
-            visibility = visibility,
-        )
diff --git a/abseil-cpp/absl/container/BUILD.bazel b/abseil-cpp/absl/container/BUILD.bazel
index 8e72ad0..f22da59 100644
--- a/abseil-cpp/absl/container/BUILD.bazel
+++ b/abseil-cpp/absl/container/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -97,8 +96,9 @@
     ],
 )
 
-cc_test(
+cc_binary(
     name = "fixed_array_benchmark",
+    testonly = 1,
     srcs = ["fixed_array_benchmark.cc"],
     copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -134,6 +134,7 @@
         "//absl/base:core_headers",
         "//absl/base:throw_delegate",
         "//absl/memory",
+        "//absl/meta:type_traits",
     ],
 )
 
@@ -159,16 +160,17 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:exception_testing",
-        "//absl/base:raw_logging_internal",
         "//absl/hash:hash_testing",
+        "//absl/log:check",
         "//absl/memory",
         "//absl/strings",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
-cc_test(
+cc_binary(
     name = "inlined_vector_benchmark",
+    testonly = 1,
     srcs = ["inlined_vector_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -218,11 +220,6 @@
     ],
 )
 
-NOTEST_TAGS_NONMOBILE = [
-    "no_test_darwin_x86_64",
-    "no_test_loonix",
-]
-
 NOTEST_TAGS_MOBILE = [
     "no_test_android_arm",
     "no_test_android_arm64",
@@ -230,8 +227,6 @@
     "no_test_ios_x86_64",
 ]
 
-NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE
-
 cc_library(
     name = "flat_hash_map",
     hdrs = ["flat_hash_map.h"],
@@ -242,6 +237,7 @@
         ":hash_function_defaults",
         ":raw_hash_map",
         "//absl/algorithm:container",
+        "//absl/base:core_headers",
         "//absl/memory",
     ],
 )
@@ -251,7 +247,7 @@
     srcs = ["flat_hash_map_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":flat_hash_map",
         ":hash_generator_testing",
@@ -259,7 +255,7 @@
         ":unordered_map_lookup_test",
         ":unordered_map_members_test",
         ":unordered_map_modifiers_test",
-        "//absl/base:raw_logging_internal",
+        "//absl/log:check",
         "//absl/types:any",
         "@com_google_googletest//:gtest_main",
     ],
@@ -285,7 +281,7 @@
     srcs = ["flat_hash_set_test.cc"],
     copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":flat_hash_set",
         ":hash_generator_testing",
@@ -293,7 +289,7 @@
         ":unordered_set_lookup_test",
         ":unordered_set_members_test",
         ":unordered_set_modifiers_test",
-        "//absl/base:raw_logging_internal",
+        "//absl/log:check",
         "//absl/memory",
         "//absl/strings",
         "@com_google_googletest//:gtest_main",
@@ -308,9 +304,10 @@
     deps = [
         ":container_memory",
         ":hash_function_defaults",
-        ":node_hash_policy",
+        ":node_slot_policy",
         ":raw_hash_map",
         "//absl/algorithm:container",
+        "//absl/base:core_headers",
         "//absl/memory",
     ],
 )
@@ -320,7 +317,7 @@
     srcs = ["node_hash_map_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":hash_generator_testing",
         ":node_hash_map",
@@ -340,9 +337,10 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":hash_function_defaults",
-        ":node_hash_policy",
+        ":node_slot_policy",
         ":raw_hash_set",
         "//absl/algorithm:container",
+        "//absl/base:core_headers",
         "//absl/memory",
     ],
 )
@@ -352,7 +350,7 @@
     srcs = ["node_hash_set_test.cc"],
     copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":node_hash_set",
         ":unordered_set_constructor_test",
@@ -381,7 +379,7 @@
     srcs = ["internal/container_memory_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":container_memory",
         ":test_instance_tracker",
@@ -395,6 +393,9 @@
     hdrs = ["internal/hash_function_defaults.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//visibility:private",
+    ],
     deps = [
         "//absl/base:config",
         "//absl/hash",
@@ -408,7 +409,7 @@
     srcs = ["internal/hash_function_defaults_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS,
+    tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
     deps = [
         ":hash_function_defaults",
         "//absl/hash",
@@ -463,7 +464,10 @@
     hdrs = ["internal/hash_policy_traits.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = ["//absl/meta:type_traits"],
+    deps = [
+        ":common_policy_traits",
+        "//absl/meta:type_traits",
+    ],
 )
 
 cc_test(
@@ -478,6 +482,26 @@
 )
 
 cc_library(
+    name = "common_policy_traits",
+    hdrs = ["internal/common_policy_traits.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = ["//absl/meta:type_traits"],
+)
+
+cc_test(
+    name = "common_policy_traits_test",
+    srcs = ["internal/common_policy_traits_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":common_policy_traits",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
     name = "hashtable_debug",
     hdrs = ["internal/hashtable_debug.h"],
     copts = ABSL_DEFAULT_COPTS,
@@ -507,13 +531,16 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
-        ":have_sse",
         "//absl/base",
+        "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:exponential_biased",
+        "//absl/base:raw_logging_internal",
         "//absl/debugging:stacktrace",
         "//absl/memory",
+        "//absl/profiling:exponential_biased",
+        "//absl/profiling:sample_recorder",
         "//absl/synchronization",
+        "//absl/time",
         "//absl/utility",
     ],
 )
@@ -522,10 +549,14 @@
     name = "hashtablez_sampler_test",
     srcs = ["internal/hashtablez_sampler_test.cc"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":hashtablez_sampler",
-        ":have_sse",
+        "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/profiling:sample_recorder",
         "//absl/synchronization",
         "//absl/synchronization:thread_pool",
         "//absl/time",
@@ -534,21 +565,21 @@
 )
 
 cc_library(
-    name = "node_hash_policy",
-    hdrs = ["internal/node_hash_policy.h"],
+    name = "node_slot_policy",
+    hdrs = ["internal/node_slot_policy.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = ["//absl/base:config"],
 )
 
 cc_test(
-    name = "node_hash_policy_test",
-    srcs = ["internal/node_hash_policy_test.cc"],
+    name = "node_slot_policy_test",
+    srcs = ["internal/node_slot_policy_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":hash_policy_traits",
-        ":node_hash_policy",
+        ":node_slot_policy",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -566,14 +597,6 @@
 )
 
 cc_library(
-    name = "have_sse",
-    hdrs = ["internal/have_sse.h"],
-    copts = ABSL_DEFAULT_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
-)
-
-cc_library(
     name = "common",
     hdrs = ["internal/common.h"],
     copts = ABSL_DEFAULT_COPTS,
@@ -597,14 +620,16 @@
         ":hash_policy_traits",
         ":hashtable_debug_hooks",
         ":hashtablez_sampler",
-        ":have_sse",
-        ":layout",
-        "//absl/base:bits",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:dynamic_annotations",
         "//absl/base:endian",
+        "//absl/base:prefetch",
+        "//absl/base:raw_logging_internal",
+        "//absl/hash",
         "//absl/memory",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/utility",
     ],
 )
@@ -614,9 +639,15 @@
     srcs = ["internal/raw_hash_set_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkstatic = 1,
-    tags = NOTEST_TAGS,
+    tags = NOTEST_TAGS_MOBILE + [
+        "no_test_loonix",
+        # TODO(b/237097643): investigate race and remove
+        "noarm_gemu",
+    ],
     deps = [
         ":container_memory",
+        ":flat_hash_map",
+        ":flat_hash_set",
         ":hash_function_defaults",
         ":hash_policy_testing",
         ":hashtable_debug",
@@ -624,12 +655,52 @@
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/base:prefetch",
+        "//absl/log",
         "//absl/strings",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
+cc_binary(
+    name = "raw_hash_set_benchmark",
+    testonly = 1,
+    srcs = ["internal/raw_hash_set_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":hash_function_defaults",
+        ":raw_hash_set",
+        "//absl/base:raw_logging_internal",
+        "//absl/strings:str_format",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
+cc_binary(
+    name = "raw_hash_set_probe_benchmark",
+    testonly = 1,
+    srcs = ["internal/raw_hash_set_probe_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = select({
+        "//conditions:default": [],
+    }) + ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":flat_hash_map",
+        ":hash_function_defaults",
+        ":hashtable_debug",
+        ":raw_hash_set",
+        "//absl/random",
+        "//absl/random:distributions",
+        "//absl/strings",
+        "//absl/strings:str_format",
+    ],
+)
+
 cc_test(
     name = "raw_hash_set_allocator_test",
     size = "small",
@@ -665,18 +736,34 @@
     srcs = ["internal/layout_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS,
+    tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
     visibility = ["//visibility:private"],
     deps = [
         ":layout",
         "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log:check",
         "//absl/types:span",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
+cc_binary(
+    name = "layout_benchmark",
+    testonly = 1,
+    srcs = ["internal/layout_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":layout",
+        "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
 cc_library(
     name = "tracked",
     testonly = 1,
@@ -796,7 +883,7 @@
     srcs = ["internal/unordered_set_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":unordered_set_constructor_test",
         ":unordered_set_lookup_test",
@@ -811,7 +898,7 @@
     srcs = ["internal/unordered_map_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = NOTEST_TAGS_NONMOBILE,
+    tags = ["no_test_loonix"],
     deps = [
         ":unordered_map_constructor_test",
         ":unordered_map_lookup_test",
@@ -821,6 +908,22 @@
     ],
 )
 
+cc_test(
+    name = "sample_element_size_test",
+    srcs = ["sample_element_size_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["no_test_loonix"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":flat_hash_map",
+        ":flat_hash_set",
+        ":node_hash_map",
+        ":node_hash_set",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
 cc_library(
     name = "btree",
     srcs = [
@@ -836,10 +939,12 @@
     visibility = ["//visibility:public"],
     deps = [
         ":common",
+        ":common_policy_traits",
         ":compressed_tuple",
         ":container_memory",
         ":layout",
         "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
         "//absl/base:throw_delegate",
         "//absl/memory",
         "//absl/meta:type_traits",
@@ -875,20 +980,27 @@
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     shard_count = 10,
+    tags = [
+        "no_test:os:ios",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
     visibility = ["//visibility:private"],
     deps = [
         ":btree",
         ":btree_test_common",
         ":counting_allocator",
         ":test_instance_tracker",
+        "//absl/algorithm:container",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/flags:flag",
         "//absl/hash:hash_testing",
         "//absl/memory",
-        "//absl/meta:type_traits",
+        "//absl/random",
         "//absl/strings",
         "//absl/types:compare",
+        "//absl/types:optional",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -909,10 +1021,12 @@
         ":flat_hash_map",
         ":flat_hash_set",
         ":hashtable_debug",
+        "//absl/algorithm:container",
         "//absl/base:raw_logging_internal",
-        "//absl/flags:flag",
         "//absl/hash",
+        "//absl/log",
         "//absl/memory",
+        "//absl/random",
         "//absl/strings:cord",
         "//absl/strings:str_format",
         "//absl/time",
diff --git a/abseil-cpp/absl/container/CMakeLists.txt b/abseil-cpp/absl/container/CMakeLists.txt
index eb202c4..39d95e0 100644
--- a/abseil-cpp/absl/container/CMakeLists.txt
+++ b/abseil-cpp/absl/container/CMakeLists.txt
@@ -14,15 +14,6 @@
 # limitations under the License.
 #
 
-# This is deprecated and will be removed in the future.  It also doesn't do
-# anything anyways.  Prefer to use the library associated with the API you are
-# using.
-absl_cc_library(
-  NAME
-    container
-  PUBLIC
-)
-
 absl_cc_library(
   NAME
     btree
@@ -37,6 +28,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::container_common
+    absl::common_policy_traits
     absl::compare
     absl::compressed_tuple
     absl::container_memory
@@ -44,12 +36,14 @@
     absl::core_headers
     absl::layout
     absl::memory
+    absl::raw_logging_internal
     absl::strings
     absl::throw_delegate
     absl::type_traits
     absl::utility
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     btree_test_common
@@ -78,6 +72,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::algorithm_container
     absl::btree
     absl::btree_test_common
     absl::compare
@@ -85,13 +80,15 @@
     absl::counting_allocator
     absl::flags
     absl::hash_testing
+    absl::optional
+    absl::random_random
     absl::raw_logging_internal
     absl::strings
     absl::test_instance_tracker
-    absl::type_traits
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     compressed_tuple
@@ -118,7 +115,7 @@
     absl::optional
     absl::test_instance_tracker
     absl::utility
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -153,7 +150,7 @@
     absl::exception_testing
     absl::hash_testing
     absl::memory
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -167,9 +164,10 @@
     absl::fixed_array
     absl::config
     absl::exception_safety_testing
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     inlined_vector_internal
@@ -199,9 +197,11 @@
     absl::inlined_vector_internal
     absl::throw_delegate
     absl::memory
+    absl::type_traits
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     counting_allocator
@@ -221,17 +221,17 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
-    absl::counting_allocator
-    absl::inlined_vector
-    absl::test_instance_tracker
+    absl::check
     absl::config
     absl::core_headers
+    absl::counting_allocator
     absl::exception_testing
     absl::hash_testing
+    absl::inlined_vector
     absl::memory
-    absl::raw_logging_internal
     absl::strings
-    gmock_main
+    absl::test_instance_tracker
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -245,9 +245,10 @@
     absl::inlined_vector
     absl::config
     absl::exception_safety_testing
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     test_instance_tracker
@@ -271,7 +272,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::test_instance_tracker
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -283,6 +284,7 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::container_memory
+    absl::core_headers
     absl::hash_function_defaults
     absl::raw_hash_map
     absl::algorithm_container
@@ -298,15 +300,15 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::any
+    absl::check
     absl::flat_hash_map
     absl::hash_generator_testing
     absl::unordered_map_constructor_test
     absl::unordered_map_lookup_test
     absl::unordered_map_members_test
     absl::unordered_map_modifiers_test
-    absl::any
-    absl::raw_logging_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -335,16 +337,16 @@
     ${ABSL_TEST_COPTS}
     "-DUNORDERED_SET_CXX17"
   DEPS
+    absl::check
     absl::flat_hash_set
     absl::hash_generator_testing
+    absl::memory
+    absl::strings
     absl::unordered_set_constructor_test
     absl::unordered_set_lookup_test
     absl::unordered_set_members_test
     absl::unordered_set_modifiers_test
-    absl::memory
-    absl::raw_logging_internal
-    absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -356,8 +358,9 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::container_memory
+    absl::core_headers
     absl::hash_function_defaults
-    absl::node_hash_policy
+    absl::node_slot_policy
     absl::raw_hash_map
     absl::algorithm_container
     absl::memory
@@ -379,7 +382,7 @@
     absl::unordered_map_lookup_test
     absl::unordered_map_members_test
     absl::unordered_map_modifiers_test
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -390,8 +393,9 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::core_headers
     absl::hash_function_defaults
-    absl::node_hash_policy
+    absl::node_slot_policy
     absl::raw_hash_set
     absl::algorithm_container
     absl::memory
@@ -413,9 +417,10 @@
     absl::unordered_set_lookup_test
     absl::unordered_set_members_test
     absl::unordered_set_modifiers_test
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     container_memory
@@ -442,9 +447,10 @@
     absl::container_memory
     absl::strings
     absl::test_instance_tracker
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hash_function_defaults
@@ -474,9 +480,10 @@
     absl::hash
     absl::random_random
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hash_generator_testing
@@ -494,6 +501,7 @@
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hash_policy_testing
@@ -516,9 +524,10 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::hash_policy_testing
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hash_policy_traits
@@ -527,6 +536,7 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::common_policy_traits
     absl::meta
   PUBLIC
 )
@@ -540,9 +550,35 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::hash_policy_traits
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    common_policy_traits
+  HDRS
+    "internal/common_policy_traits.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::meta
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    common_policy_traits_test
+  SRCS
+    "internal/common_policy_traits_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::common_policy_traits
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hashtablez_sampler
@@ -555,9 +591,12 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::base
+    absl::config
     absl::exponential_biased
-    absl::have_sse
+    absl::raw_logging_internal
+    absl::sample_recorder
     absl::synchronization
+    absl::time
 )
 
 absl_cc_test(
@@ -568,11 +607,12 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::config
     absl::hashtablez_sampler
-    absl::have_sse
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hashtable_debug
@@ -584,6 +624,7 @@
     absl::hashtable_debug_hooks
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     hashtable_debug_hooks
@@ -596,20 +637,12 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
-    have_sse
+    node_slot_policy
   HDRS
-    "internal/have_sse.h"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-)
-
-absl_cc_library(
-  NAME
-    node_hash_policy
-  HDRS
-    "internal/node_hash_policy.h"
+    "internal/node_slot_policy.h"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
@@ -619,17 +652,18 @@
 
 absl_cc_test(
   NAME
-    node_hash_policy_test
+    node_slot_policy_test
   SRCS
-    "internal/node_hash_policy_test.cc"
+    "internal/node_slot_policy_test.cc"
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
     absl::hash_policy_traits
-    absl::node_hash_policy
-    gmock_main
+    absl::node_slot_policy
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     raw_hash_map
@@ -644,6 +678,7 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     container_common
@@ -655,6 +690,7 @@
     absl::type_traits
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     raw_hash_set
@@ -671,16 +707,18 @@
     absl::container_common
     absl::container_memory
     absl::core_headers
+    absl::dynamic_annotations
     absl::endian
+    absl::hash
     absl::hash_policy_traits
     absl::hashtable_debug_hooks
-    absl::have_sse
-    absl::layout
+    absl::hashtablez_sampler
     absl::memory
     absl::meta
     absl::optional
+    absl::prefetch
+    absl::raw_logging_internal
     absl::utility
-    absl::hashtablez_sampler
   PUBLIC
 )
 
@@ -692,17 +730,20 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::base
+    absl::config
     absl::container_memory
+    absl::core_headers
+    absl::flat_hash_map
+    absl::flat_hash_set
     absl::hash_function_defaults
     absl::hash_policy_testing
     absl::hashtable_debug
+    absl::log
+    absl::prefetch
     absl::raw_hash_set
-    absl::base
-    absl::config
-    absl::core_headers
-    absl::raw_logging_internal
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -716,9 +757,10 @@
     absl::raw_hash_set
     absl::tracked
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     layout
@@ -745,13 +787,14 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::layout
+    absl::check
     absl::config
     absl::core_headers
-    absl::raw_logging_internal
     absl::span
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     tracked
@@ -764,6 +807,7 @@
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_map_constructor_test
@@ -774,10 +818,11 @@
   DEPS
     absl::hash_generator_testing
     absl::hash_policy_testing
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_map_lookup_test
@@ -788,10 +833,11 @@
   DEPS
     absl::hash_generator_testing
     absl::hash_policy_testing
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_map_members_test
@@ -801,10 +847,11 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::type_traits
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_map_modifiers_test
@@ -815,10 +862,11 @@
   DEPS
     absl::hash_generator_testing
     absl::hash_policy_testing
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_set_constructor_test
@@ -829,10 +877,11 @@
   DEPS
     absl::hash_generator_testing
     absl::hash_policy_testing
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_set_lookup_test
@@ -843,10 +892,11 @@
   DEPS
     absl::hash_generator_testing
     absl::hash_policy_testing
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_set_members_test
@@ -856,10 +906,11 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::type_traits
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     unordered_set_modifiers_test
@@ -870,7 +921,7 @@
   DEPS
     absl::hash_generator_testing
     absl::hash_policy_testing
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
@@ -886,7 +937,7 @@
     absl::unordered_set_lookup_test
     absl::unordered_set_members_test
     absl::unordered_set_modifiers_test
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -901,5 +952,20 @@
     absl::unordered_map_lookup_test
     absl::unordered_map_members_test
     absl::unordered_map_modifiers_test
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    sample_element_size_test
+  SRCS
+    "sample_element_size_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::flat_hash_map
+    absl::flat_hash_set
+    absl::node_hash_map
+    absl::node_hash_set
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/container/btree_benchmark.cc b/abseil-cpp/absl/container/btree_benchmark.cc
index 4679867..0d26fd4 100644
--- a/abseil-cpp/absl/container/btree_benchmark.cc
+++ b/abseil-cpp/absl/container/btree_benchmark.cc
@@ -26,6 +26,8 @@
 #include <unordered_set>
 #include <vector>
 
+#include "benchmark/benchmark.h"
+#include "absl/algorithm/container.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/container/btree_map.h"
 #include "absl/container/btree_set.h"
@@ -33,13 +35,13 @@
 #include "absl/container/flat_hash_map.h"
 #include "absl/container/flat_hash_set.h"
 #include "absl/container/internal/hashtable_debug.h"
-#include "absl/flags/flag.h"
 #include "absl/hash/hash.h"
+#include "absl/log/log.h"
 #include "absl/memory/memory.h"
+#include "absl/random/random.h"
 #include "absl/strings/cord.h"
 #include "absl/strings/str_format.h"
 #include "absl/time/time.h"
-#include "benchmark/benchmark.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -101,39 +103,6 @@
   BM_InsertImpl<T>(state, true);
 }
 
-// container::insert sometimes returns a pair<iterator, bool> and sometimes
-// returns an iterator (for multi- containers).
-template <typename Iter>
-Iter GetIterFromInsert(const std::pair<Iter, bool>& pair) {
-  return pair.first;
-}
-template <typename Iter>
-Iter GetIterFromInsert(const Iter iter) {
-  return iter;
-}
-
-// Benchmark insertion of values into a container at the end.
-template <typename T>
-void BM_InsertEnd(benchmark::State& state) {
-  using V = typename remove_pair_const<typename T::value_type>::type;
-  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
-
-  T container;
-  const int kSize = 10000;
-  for (int i = 0; i < kSize; ++i) {
-    container.insert(Generator<V>(kSize)(i));
-  }
-  V v = Generator<V>(kSize)(kSize - 1);
-  typename T::key_type k = key_of_value(v);
-
-  auto it = container.find(k);
-  while (state.KeepRunning()) {
-    // Repeatedly removing then adding v.
-    container.erase(it);
-    it = GetIterFromInsert(container.insert(v));
-  }
-}
-
 // Benchmark inserting the first few elements in a container. In b-tree, this is
 // when the root node grows.
 template <typename T>
@@ -186,9 +155,9 @@
   BM_LookupImpl<T>(state, true);
 }
 
-// Benchmark deletion of values from a container.
+// Benchmark erasing values from a container.
 template <typename T>
-void BM_Delete(benchmark::State& state) {
+void BM_Erase(benchmark::State& state) {
   using V = typename remove_pair_const<typename T::value_type>::type;
   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
@@ -213,9 +182,9 @@
   }
 }
 
-// Benchmark deletion of multiple values from a container.
+// Benchmark erasing multiple values from a container.
 template <typename T>
-void BM_DeleteRange(benchmark::State& state) {
+void BM_EraseRange(benchmark::State& state) {
   using V = typename remove_pair_const<typename T::value_type>::type;
   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
@@ -255,6 +224,40 @@
   }
 }
 
+// Predicate that erases every other element. We can't use a lambda because
+// C++11 doesn't support generic lambdas.
+// TODO(b/207389011): consider adding benchmarks that remove different fractions
+// of keys (e.g. 10%, 90%).
+struct EraseIfPred {
+  uint64_t i = 0;
+  template <typename T>
+  bool operator()(const T&) {
+    return ++i % 2;
+  }
+};
+
+// Benchmark erasing multiple values from a container with a predicate.
+template <typename T>
+void BM_EraseIf(benchmark::State& state) {
+  using V = typename remove_pair_const<typename T::value_type>::type;
+  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+
+  // Removes half of the keys per batch.
+  const int batch_size = (kBenchmarkValues + 1) / 2;
+  EraseIfPred pred;
+  while (state.KeepRunningBatch(batch_size)) {
+    state.PauseTiming();
+    {
+      T container(values.begin(), values.end());
+      state.ResumeTiming();
+      erase_if(container, pred);
+      benchmark::DoNotOptimize(container);
+      state.PauseTiming();
+    }
+    state.ResumeTiming();
+  }
+}
+
 // Benchmark steady-state insert (into first half of range) and remove (from
 // second half of range), treating the container approximately like a queue with
 // log-time access for all elements. This benchmark does not test the case where
@@ -510,15 +513,14 @@
   void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
   BENCHMARK(BM_##type##_##func)
 
-#define MY_BENCHMARK3(type)               \
+#define MY_BENCHMARK3_STL(type)           \
   MY_BENCHMARK4(type, Insert);            \
   MY_BENCHMARK4(type, InsertSorted);      \
-  MY_BENCHMARK4(type, InsertEnd);         \
   MY_BENCHMARK4(type, InsertSmall);       \
   MY_BENCHMARK4(type, Lookup);            \
   MY_BENCHMARK4(type, FullLookup);        \
-  MY_BENCHMARK4(type, Delete);            \
-  MY_BENCHMARK4(type, DeleteRange);       \
+  MY_BENCHMARK4(type, Erase);             \
+  MY_BENCHMARK4(type, EraseRange);        \
   MY_BENCHMARK4(type, QueueAddRem);       \
   MY_BENCHMARK4(type, MixedAddRem);       \
   MY_BENCHMARK4(type, Fifo);              \
@@ -526,9 +528,13 @@
   MY_BENCHMARK4(type, InsertRangeRandom); \
   MY_BENCHMARK4(type, InsertRangeSorted)
 
+#define MY_BENCHMARK3(type)     \
+  MY_BENCHMARK4(type, EraseIf); \
+  MY_BENCHMARK3_STL(type)
+
 #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
-  MY_BENCHMARK3(stl_##type);                    \
-  MY_BENCHMARK3(stl_unordered_##type);          \
+  MY_BENCHMARK3_STL(stl_##type);                \
+  MY_BENCHMARK3_STL(stl_unordered_##type);      \
   MY_BENCHMARK3(btree_256_##type)
 
 #define MY_BENCHMARK2(type)                \
@@ -718,17 +724,40 @@
       btree_set<BigTypePtr<SIZE>>;                                             \
   using btree_256_map_size##SIZE##copies##SIZE##ptr =                          \
       btree_map<int, BigTypePtr<SIZE>>;                                        \
-  MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr);                        \
-  MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr);              \
+  MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr);                    \
+  MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr);          \
   MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr);                  \
   MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr);                  \
-  MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr);                        \
-  MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr);              \
+  MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr);                    \
+  MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr);          \
   MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr);                  \
   MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
 
 BIG_TYPE_PTR_BENCHMARKS(32);
 
+void BM_BtreeSet_IteratorSubtraction(benchmark::State& state) {
+  absl::InsecureBitGen bitgen;
+  std::vector<int> vec;
+  // Randomize the set's insertion order so the nodes aren't all full.
+  vec.reserve(state.range(0));
+  for (int i = 0; i < state.range(0); ++i) vec.push_back(i);
+  absl::c_shuffle(vec, bitgen);
+
+  absl::btree_set<int> set;
+  for (int i : vec) set.insert(i);
+
+  size_t distance = absl::Uniform(bitgen, 0u, set.size());
+  while (state.KeepRunningBatch(distance)) {
+    size_t end = absl::Uniform(bitgen, distance, set.size());
+    size_t begin = end - distance;
+    benchmark::DoNotOptimize(set.find(static_cast<int>(end)) -
+                             set.find(static_cast<int>(begin)));
+    distance = absl::Uniform(bitgen, 0u, set.size());
+  }
+}
+
+BENCHMARK(BM_BtreeSet_IteratorSubtraction)->Range(1 << 10, 1 << 20);
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/btree_map.h b/abseil-cpp/absl/container/btree_map.h
index abc09b0..cd3ee2b 100644
--- a/abseil-cpp/absl/container/btree_map.h
+++ b/abseil-cpp/absl/container/btree_map.h
@@ -35,14 +35,20 @@
 //
 // However, these types should not be considered drop-in replacements for
 // `std::map` and `std::multimap` as there are some API differences, which are
-// noted in this header file.
+// noted in this header file. The most consequential differences with respect to
+// migrating to b-tree from the STL types are listed in the next paragraph.
+// Other API differences are minor.
 //
 // Importantly, insertions and deletions may invalidate outstanding iterators,
 // pointers, and references to elements. Such invalidations are typically only
 // an issue if insertion and deletion operations are interleaved with the use of
-// more than one iterator, pointer, or reference simultaneously. For this
-// reason, `insert()` and `erase()` return a valid iterator at the current
-// position.
+// more than one iterator, pointer, or reference simultaneously.  For this
+// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid
+// iterator at the current position. Another important difference is that
+// key-types must be copy-constructible.
+//
+// Another API difference is that btree iterators can be subtracted, and this
+// is faster than using std::distance.
 
 #ifndef ABSL_CONTAINER_BTREE_MAP_H_
 #define ABSL_CONTAINER_BTREE_MAP_H_
@@ -53,6 +59,14 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
+namespace container_internal {
+
+template <typename Key, typename Data, typename Compare, typename Alloc,
+          int TargetNodeSize, bool IsMulti>
+struct map_params;
+
+}  // namespace container_internal
+
 // absl::btree_map<>
 //
 // An `absl::btree_map<K, V>` is an ordered associative container of
@@ -74,7 +88,7 @@
     : public container_internal::btree_map_container<
           container_internal::btree<container_internal::map_params<
               Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
-              /*Multi=*/false>>> {
+              /*IsMulti=*/false>>> {
   using Base = typename btree_map::btree_map_container;
 
  public:
@@ -311,7 +325,8 @@
   // btree_map::extract()
   //
   // Extracts the indicated element, erasing it in the process, and returns it
-  // as a C++17-compatible node handle. Overloads are listed below.
+  // as a C++17-compatible node handle. Any references, pointers, or iterators
+  // are invalidated. Overloads are listed below.
   //
   // node_type extract(const_iterator position):
   //
@@ -336,6 +351,21 @@
   // It does NOT refer to the data layout of the underlying btree.
   using Base::extract;
 
+  // btree_map::extract_and_get_next()
+  //
+  // Extracts the indicated element, erasing it in the process, and returns it
+  // as a C++17-compatible node handle along with an iterator to the next
+  // element.
+  //
+  // extract_and_get_next_return_type extract_and_get_next(
+  //     const_iterator position):
+  //
+  //   Extracts the element at the indicated position, returns a struct
+  //   containing a member named `node`: a node handle owning that extracted
+  //   data and a member named `next`: an iterator pointing to the next element
+  //   in the btree.
+  using Base::extract_and_get_next;
+
   // btree_map::merge()
   //
   // Extracts elements from a given `source` btree_map into this
@@ -366,8 +396,8 @@
   // Determines whether an element comparing equal to the given `key` exists
   // within the `btree_map`, returning `true` if so or `false` otherwise.
   //
-  // Supports heterogeneous lookup, provided that the map is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
   using Base::contains;
 
   // btree_map::count()
@@ -378,15 +408,14 @@
   // the `btree_map`. Note that this function will return either `1` or `0`
   // since duplicate elements are not allowed within a `btree_map`.
   //
-  // Supports heterogeneous lookup, provided that the map is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
   using Base::count;
 
   // btree_map::equal_range()
   //
-  // Returns a closed range [first, last], defined by a `std::pair` of two
-  // iterators, containing all elements with the passed key in the
-  // `btree_map`.
+  // Returns a half-open range [first, last), defined by a `std::pair` of two
+  // iterators, containing all elements with the passed key in the `btree_map`.
   using Base::equal_range;
 
   // btree_map::find()
@@ -396,10 +425,34 @@
   //
   // Finds an element with the passed `key` within the `btree_map`.
   //
-  // Supports heterogeneous lookup, provided that the map is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
   using Base::find;
 
+  // btree_map::lower_bound()
+  //
+  // template <typename K> iterator lower_bound(const K& key):
+  // template <typename K> const_iterator lower_bound(const K& key) const:
+  //
+  // Finds the first element with a key that is not less than `key` within the
+  // `btree_map`.
+  //
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
+  using Base::lower_bound;
+
+  // btree_map::upper_bound()
+  //
+  // template <typename K> iterator upper_bound(const K& key):
+  // template <typename K> const_iterator upper_bound(const K& key) const:
+  //
+  // Finds the first element with a key that is greater than `key` within the
+  // `btree_map`.
+  //
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
+  using Base::upper_bound;
+
   // btree_map::operator[]()
   //
   // Returns a reference to the value mapped to the passed key within the
@@ -444,15 +497,11 @@
 // absl::erase_if(absl::btree_map<>, Pred)
 //
 // Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
 template <typename K, typename V, typename C, typename A, typename Pred>
-void erase_if(btree_map<K, V, C, A> &map, Pred pred) {
-  for (auto it = map.begin(); it != map.end();) {
-    if (pred(*it)) {
-      it = map.erase(it);
-    } else {
-      ++it;
-    }
-  }
+typename btree_map<K, V, C, A>::size_type erase_if(
+    btree_map<K, V, C, A> &map, Pred pred) {
+  return container_internal::btree_access::erase_if(map, std::move(pred));
 }
 
 // absl::btree_multimap
@@ -477,7 +526,7 @@
     : public container_internal::btree_multimap_container<
           container_internal::btree<container_internal::map_params<
               Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
-              /*Multi=*/true>>> {
+              /*IsMulti=*/true>>> {
   using Base = typename btree_multimap::btree_multimap_container;
 
  public:
@@ -668,11 +717,25 @@
   // It does NOT refer to the data layout of the underlying btree.
   using Base::extract;
 
+  // btree_multimap::extract_and_get_next()
+  //
+  // Extracts the indicated element, erasing it in the process, and returns it
+  // as a C++17-compatible node handle along with an iterator to the next
+  // element.
+  //
+  // extract_and_get_next_return_type extract_and_get_next(
+  //     const_iterator position):
+  //
+  //   Extracts the element at the indicated position, returns a struct
+  //   containing a member named `node`: a node handle owning that extracted
+  //   data and a member named `next`: an iterator pointing to the next element
+  //   in the btree.
+  using Base::extract_and_get_next;
+
   // btree_multimap::merge()
   //
-  // Extracts elements from a given `source` btree_multimap into this
-  // `btree_multimap`. If the destination `btree_multimap` already contains an
-  // element with an equivalent key, that element is not extracted.
+  // Extracts all elements from a given `source` btree_multimap into this
+  // `btree_multimap`.
   using Base::merge;
 
   // btree_multimap::swap(btree_multimap& other)
@@ -692,8 +755,8 @@
   // Determines whether an element comparing equal to the given `key` exists
   // within the `btree_multimap`, returning `true` if so or `false` otherwise.
   //
-  // Supports heterogeneous lookup, provided that the map is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
   using Base::contains;
 
   // btree_multimap::count()
@@ -703,13 +766,13 @@
   // Returns the number of elements comparing equal to the given `key` within
   // the `btree_multimap`.
   //
-  // Supports heterogeneous lookup, provided that the map is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
   using Base::count;
 
   // btree_multimap::equal_range()
   //
-  // Returns a closed range [first, last], defined by a `std::pair` of two
+  // Returns a half-open range [first, last), defined by a `std::pair` of two
   // iterators, containing all elements with the passed key in the
   // `btree_multimap`.
   using Base::equal_range;
@@ -721,10 +784,34 @@
   //
   // Finds an element with the passed `key` within the `btree_multimap`.
   //
-  // Supports heterogeneous lookup, provided that the map is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
   using Base::find;
 
+  // btree_multimap::lower_bound()
+  //
+  // template <typename K> iterator lower_bound(const K& key):
+  // template <typename K> const_iterator lower_bound(const K& key) const:
+  //
+  // Finds the first element with a key that is not less than `key` within the
+  // `btree_multimap`.
+  //
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
+  using Base::lower_bound;
+
+  // btree_multimap::upper_bound()
+  //
+  // template <typename K> iterator upper_bound(const K& key):
+  // template <typename K> const_iterator upper_bound(const K& key) const:
+  //
+  // Finds the first element with a key that is greater than `key` within the
+  // `btree_multimap`.
+  //
+  // Supports heterogeneous lookup, provided that the map has a compatible
+  // heterogeneous comparator.
+  using Base::upper_bound;
+
   // btree_multimap::get_allocator()
   //
   // Returns the allocator function associated with this `btree_multimap`.
@@ -752,17 +839,46 @@
 // absl::erase_if(absl::btree_multimap<>, Pred)
 //
 // Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
 template <typename K, typename V, typename C, typename A, typename Pred>
-void erase_if(btree_multimap<K, V, C, A> &map, Pred pred) {
-  for (auto it = map.begin(); it != map.end();) {
-    if (pred(*it)) {
-      it = map.erase(it);
-    } else {
-      ++it;
-    }
-  }
+typename btree_multimap<K, V, C, A>::size_type erase_if(
+    btree_multimap<K, V, C, A> &map, Pred pred) {
+  return container_internal::btree_access::erase_if(map, std::move(pred));
 }
 
+namespace container_internal {
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+          int TargetNodeSize, bool IsMulti>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
+                                  /*IsMap=*/true, map_slot_policy<Key, Data>> {
+  using super_type = typename map_params::common_params;
+  using mapped_type = Data;
+  // This type allows us to move keys when it is safe to do so. It is safe
+  // for maps in which value_type and mutable_value_type are layout compatible.
+  using slot_policy = typename super_type::slot_policy;
+  using slot_type = typename super_type::slot_type;
+  using value_type = typename super_type::value_type;
+  using init_type = typename super_type::init_type;
+
+  template <typename V>
+  static auto key(const V &value) -> decltype(value.first) {
+    return value.first;
+  }
+  static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+  static const Key &key(slot_type *s) { return slot_policy::key(s); }
+  // For use in node handle.
+  static auto mutable_key(slot_type *s)
+      -> decltype(slot_policy::mutable_key(s)) {
+    return slot_policy::mutable_key(s);
+  }
+  static mapped_type &value(value_type *value) { return value->second; }
+};
+
+}  // namespace container_internal
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/abseil-cpp/absl/container/btree_set.h b/abseil-cpp/absl/container/btree_set.h
index 21ef0a0..51dc42b 100644
--- a/abseil-cpp/absl/container/btree_set.h
+++ b/abseil-cpp/absl/container/btree_set.h
@@ -35,14 +35,19 @@
 //
 // However, these types should not be considered drop-in replacements for
 // `std::set` and `std::multiset` as there are some API differences, which are
-// noted in this header file.
+// noted in this header file. The most consequential differences with respect to
+// migrating to b-tree from the STL types are listed in the next paragraph.
+// Other API differences are minor.
 //
 // Importantly, insertions and deletions may invalidate outstanding iterators,
 // pointers, and references to elements. Such invalidations are typically only
 // an issue if insertion and deletion operations are interleaved with the use of
 // more than one iterator, pointer, or reference simultaneously. For this
-// reason, `insert()` and `erase()` return a valid iterator at the current
-// position.
+// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid
+// iterator at the current position.
+//
+// Another API difference is that btree iterators can be subtracted, and this
+// is faster than using std::distance.
 
 #ifndef ABSL_CONTAINER_BTREE_SET_H_
 #define ABSL_CONTAINER_BTREE_SET_H_
@@ -53,6 +58,17 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
+namespace container_internal {
+
+template <typename Key>
+struct set_slot_policy;
+
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+          bool IsMulti>
+struct set_params;
+
+}  // namespace container_internal
+
 // absl::btree_set<>
 //
 // An `absl::btree_set<K>` is an ordered associative container of unique key
@@ -74,7 +90,7 @@
     : public container_internal::btree_set_container<
           container_internal::btree<container_internal::set_params<
               Key, Compare, Alloc, /*TargetNodeSize=*/256,
-              /*Multi=*/false>>> {
+              /*IsMulti=*/false>>> {
   using Base = typename btree_set::btree_set_container;
 
  public:
@@ -256,7 +272,8 @@
   // btree_set::extract()
   //
   // Extracts the indicated element, erasing it in the process, and returns it
-  // as a C++17-compatible node handle. Overloads are listed below.
+  // as a C++17-compatible node handle. Any references, pointers, or iterators
+  // are invalidated. Overloads are listed below.
   //
   // node_type extract(const_iterator position):
   //
@@ -276,6 +293,21 @@
   // It does NOT refer to the data layout of the underlying btree.
   using Base::extract;
 
+  // btree_set::extract_and_get_next()
+  //
+  // Extracts the indicated element, erasing it in the process, and returns it
+  // as a C++17-compatible node handle along with an iterator to the next
+  // element.
+  //
+  // extract_and_get_next_return_type extract_and_get_next(
+  //     const_iterator position):
+  //
+  //   Extracts the element at the indicated position, returns a struct
+  //   containing a member named `node`: a node handle owning that extracted
+  //   data and a member named `next`: an iterator pointing to the next element
+  //   in the btree.
+  using Base::extract_and_get_next;
+
   // btree_set::merge()
   //
   // Extracts elements from a given `source` btree_set into this
@@ -300,8 +332,8 @@
   // Determines whether an element comparing equal to the given `key` exists
   // within the `btree_set`, returning `true` if so or `false` otherwise.
   //
-  // Supports heterogeneous lookup, provided that the set is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
   using Base::contains;
 
   // btree_set::count()
@@ -312,8 +344,8 @@
   // the `btree_set`. Note that this function will return either `1` or `0`
   // since duplicate elements are not allowed within a `btree_set`.
   //
-  // Supports heterogeneous lookup, provided that the set is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
   using Base::count;
 
   // btree_set::equal_range()
@@ -330,10 +362,32 @@
   //
   // Finds an element with the passed `key` within the `btree_set`.
   //
-  // Supports heterogeneous lookup, provided that the set is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
   using Base::find;
 
+  // btree_set::lower_bound()
+  //
+  // template <typename K> iterator lower_bound(const K& key):
+  // template <typename K> const_iterator lower_bound(const K& key) const:
+  //
+  // Finds the first element that is not less than `key` within the `btree_set`.
+  //
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
+  using Base::lower_bound;
+
+  // btree_set::upper_bound()
+  //
+  // template <typename K> iterator upper_bound(const K& key):
+  // template <typename K> const_iterator upper_bound(const K& key) const:
+  //
+  // Finds the first element that is greater than `key` within the `btree_set`.
+  //
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
+  using Base::upper_bound;
+
   // btree_set::get_allocator()
   //
   // Returns the allocator function associated with this `btree_set`.
@@ -363,15 +417,11 @@
 // absl::erase_if(absl::btree_set<>, Pred)
 //
 // Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
 template <typename K, typename C, typename A, typename Pred>
-void erase_if(btree_set<K, C, A> &set, Pred pred) {
-  for (auto it = set.begin(); it != set.end();) {
-    if (pred(*it)) {
-      it = set.erase(it);
-    } else {
-      ++it;
-    }
-  }
+typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set,
+                                                Pred pred) {
+  return container_internal::btree_access::erase_if(set, std::move(pred));
 }
 
 // absl::btree_multiset<>
@@ -396,7 +446,7 @@
     : public container_internal::btree_multiset_container<
           container_internal::btree<container_internal::set_params<
               Key, Compare, Alloc, /*TargetNodeSize=*/256,
-              /*Multi=*/true>>> {
+              /*IsMulti=*/true>>> {
   using Base = typename btree_multiset::btree_multiset_container;
 
  public:
@@ -580,11 +630,25 @@
   // It does NOT refer to the data layout of the underlying btree.
   using Base::extract;
 
+  // btree_multiset::extract_and_get_next()
+  //
+  // Extracts the indicated element, erasing it in the process, and returns it
+  // as a C++17-compatible node handle along with an iterator to the next
+  // element.
+  //
+  // extract_and_get_next_return_type extract_and_get_next(
+  //     const_iterator position):
+  //
+  //   Extracts the element at the indicated position, returns a struct
+  //   containing a member named `node`: a node handle owning that extracted
+  //   data and a member named `next`: an iterator pointing to the next element
+  //   in the btree.
+  using Base::extract_and_get_next;
+
   // btree_multiset::merge()
   //
-  // Extracts elements from a given `source` btree_multiset into this
-  // `btree_multiset`. If the destination `btree_multiset` already contains an
-  // element with an equivalent key, that element is not extracted.
+  // Extracts all elements from a given `source` btree_multiset into this
+  // `btree_multiset`.
   using Base::merge;
 
   // btree_multiset::swap(btree_multiset& other)
@@ -604,8 +668,8 @@
   // Determines whether an element comparing equal to the given `key` exists
   // within the `btree_multiset`, returning `true` if so or `false` otherwise.
   //
-  // Supports heterogeneous lookup, provided that the set is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
   using Base::contains;
 
   // btree_multiset::count()
@@ -615,8 +679,8 @@
   // Returns the number of elements comparing equal to the given `key` within
   // the `btree_multiset`.
   //
-  // Supports heterogeneous lookup, provided that the set is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
   using Base::count;
 
   // btree_multiset::equal_range()
@@ -633,10 +697,34 @@
   //
   // Finds an element with the passed `key` within the `btree_multiset`.
   //
-  // Supports heterogeneous lookup, provided that the set is provided a
-  // compatible heterogeneous comparator.
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
   using Base::find;
 
+  // btree_multiset::lower_bound()
+  //
+  // template <typename K> iterator lower_bound(const K& key):
+  // template <typename K> const_iterator lower_bound(const K& key) const:
+  //
+  // Finds the first element that is not less than `key` within the
+  // `btree_multiset`.
+  //
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
+  using Base::lower_bound;
+
+  // btree_multiset::upper_bound()
+  //
+  // template <typename K> iterator upper_bound(const K& key):
+  // template <typename K> const_iterator upper_bound(const K& key) const:
+  //
+  // Finds the first element that is greater than `key` within the
+  // `btree_multiset`.
+  //
+  // Supports heterogeneous lookup, provided that the set has a compatible
+  // heterogeneous comparator.
+  using Base::upper_bound;
+
   // btree_multiset::get_allocator()
   //
   // Returns the allocator function associated with this `btree_multiset`.
@@ -666,17 +754,67 @@
 // absl::erase_if(absl::btree_multiset<>, Pred)
 //
 // Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
 template <typename K, typename C, typename A, typename Pred>
-void erase_if(btree_multiset<K, C, A> &set, Pred pred) {
-  for (auto it = set.begin(); it != set.end();) {
-    if (pred(*it)) {
-      it = set.erase(it);
-    } else {
-      ++it;
-    }
-  }
+typename btree_multiset<K, C, A>::size_type erase_if(
+   btree_multiset<K, C, A> & set, Pred pred) {
+  return container_internal::btree_access::erase_if(set, std::move(pred));
 }
 
+namespace container_internal {
+
+// This type implements the necessary functions from the
+// absl::container_internal::slot_type interface for btree_(multi)set.
+template <typename Key>
+struct set_slot_policy {
+  using slot_type = Key;
+  using value_type = Key;
+  using mutable_value_type = Key;
+
+  static value_type &element(slot_type *slot) { return *slot; }
+  static const value_type &element(const slot_type *slot) { return *slot; }
+
+  template <typename Alloc, class... Args>
+  static void construct(Alloc *alloc, slot_type *slot, Args &&...args) {
+    absl::allocator_traits<Alloc>::construct(*alloc, slot,
+                                             std::forward<Args>(args)...);
+  }
+
+  template <typename Alloc>
+  static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+    absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+  }
+
+  template <typename Alloc>
+  static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) {
+    absl::allocator_traits<Alloc>::construct(*alloc, slot, *other);
+  }
+
+  template <typename Alloc>
+  static void destroy(Alloc *alloc, slot_type *slot) {
+    absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+  }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+          bool IsMulti>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
+                                  /*IsMap=*/false, set_slot_policy<Key>> {
+  using value_type = Key;
+  using slot_type = typename set_params::common_params::slot_type;
+
+  template <typename V>
+  static const V &key(const V &value) {
+    return value;
+  }
+  static const Key &key(const slot_type *slot) { return *slot; }
+  static const Key &key(slot_type *slot) { return *slot; }
+};
+
+}  // namespace container_internal
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/abseil-cpp/absl/container/btree_test.cc b/abseil-cpp/absl/container/btree_test.cc
index 1bfa0c2..72f446b 100644
--- a/abseil-cpp/absl/container/btree_test.cc
+++ b/abseil-cpp/absl/container/btree_test.cc
@@ -14,17 +14,25 @@
 
 #include "absl/container/btree_test.h"
 
+#include <algorithm>
+#include <array>
 #include <cstdint>
+#include <functional>
+#include <iostream>
+#include <iterator>
 #include <limits>
 #include <map>
 #include <memory>
+#include <numeric>
 #include <stdexcept>
 #include <string>
 #include <type_traits>
 #include <utility>
+#include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/algorithm/container.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/container/btree_map.h"
@@ -34,11 +42,12 @@
 #include "absl/flags/flag.h"
 #include "absl/hash/hash_testing.h"
 #include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
+#include "absl/random/random.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/compare.h"
+#include "absl/types/optional.h"
 
 ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests");
 
@@ -55,6 +64,7 @@
 using ::testing::IsEmpty;
 using ::testing::IsNull;
 using ::testing::Pair;
+using ::testing::SizeIs;
 
 template <typename T, typename U>
 void CheckPairEquals(const T &x, const U &y) {
@@ -66,6 +76,16 @@
   CheckPairEquals(x.first, y.first);
   CheckPairEquals(x.second, y.second);
 }
+
+bool IsAssertEnabled() {
+  // Use an assert with side-effects to figure out if they are actually enabled.
+  bool assert_enabled = false;
+  assert([&]() {  // NOLINT
+    assert_enabled = true;
+    return true;
+  }());
+  return assert_enabled;
+}
 }  // namespace
 
 // The base class for a sorted associative container checker. TreeType is the
@@ -594,7 +614,7 @@
   using V = typename remove_pair_const<typename T::value_type>::type;
   const std::vector<V> random_values = GenerateValuesWithSeed<V>(
       absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
-      testing::GTEST_FLAG(random_seed));
+      GTEST_FLAG_GET(random_seed));
 
   unique_checker<T, C> container;
 
@@ -618,7 +638,7 @@
   using V = typename remove_pair_const<typename T::value_type>::type;
   const std::vector<V> random_values = GenerateValuesWithSeed<V>(
       absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
-      testing::GTEST_FLAG(random_seed));
+      GTEST_FLAG_GET(random_seed));
 
   multi_checker<T, C> container;
 
@@ -1182,12 +1202,116 @@
   EXPECT_EQ(1, tmap.size());
 }
 
+}  // namespace
+
+class BtreeNodePeer {
+ public:
+  // Yields the size of a leaf node with a specific number of values.
+  template <typename ValueType>
+  constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
+    return btree_node<
+        set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
+                   /*TargetNodeSize=*/256,  // This parameter isn't used here.
+                   /*Multi=*/false>>::SizeWithNSlots(target_values_per_node);
+  }
+
+  // Yields the number of slots in a (non-root) leaf node for this btree.
+  template <typename Btree>
+  constexpr static size_t GetNumSlotsPerNode() {
+    return btree_node<typename Btree::params_type>::kNodeSlots;
+  }
+
+  template <typename Btree>
+  constexpr static size_t GetMaxFieldType() {
+    return std::numeric_limits<
+        typename btree_node<typename Btree::params_type>::field_type>::max();
+  }
+
+  template <typename Btree>
+  constexpr static bool UsesLinearNodeSearch() {
+    return btree_node<typename Btree::params_type>::use_linear_search::value;
+  }
+
+  template <typename Btree>
+  constexpr static bool FieldTypeEqualsSlotType() {
+    return std::is_same<
+        typename btree_node<typename Btree::params_type>::field_type,
+        typename btree_node<typename Btree::params_type>::slot_type>::value;
+  }
+};
+
+namespace {
+
+class BtreeMapTest : public ::testing::Test {
+ public:
+  struct Key {};
+  struct Cmp {
+    template <typename T>
+    bool operator()(T, T) const {
+      return false;
+    }
+  };
+
+  struct KeyLin {
+    using absl_btree_prefer_linear_node_search = std::true_type;
+  };
+  struct CmpLin : Cmp {
+    using absl_btree_prefer_linear_node_search = std::true_type;
+  };
+
+  struct KeyBin {
+    using absl_btree_prefer_linear_node_search = std::false_type;
+  };
+  struct CmpBin : Cmp {
+    using absl_btree_prefer_linear_node_search = std::false_type;
+  };
+
+  template <typename K, typename C>
+  static bool IsLinear() {
+    return BtreeNodePeer::UsesLinearNodeSearch<absl::btree_map<K, int, C>>();
+  }
+};
+
+TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) {
+  // Test requesting linear search by directly exporting an alias.
+  EXPECT_FALSE((IsLinear<Key, Cmp>()));
+  EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
+  EXPECT_TRUE((IsLinear<Key, CmpLin>()));
+  EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
+}
+
+TEST_F(BtreeMapTest, LinearChoiceTree) {
+  // Cmp has precedence, and is forcing binary
+  EXPECT_FALSE((IsLinear<Key, CmpBin>()));
+  EXPECT_FALSE((IsLinear<KeyLin, CmpBin>()));
+  EXPECT_FALSE((IsLinear<KeyBin, CmpBin>()));
+  EXPECT_FALSE((IsLinear<int, CmpBin>()));
+  EXPECT_FALSE((IsLinear<std::string, CmpBin>()));
+  // Cmp has precedence, and is forcing linear
+  EXPECT_TRUE((IsLinear<Key, CmpLin>()));
+  EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
+  EXPECT_TRUE((IsLinear<KeyBin, CmpLin>()));
+  EXPECT_TRUE((IsLinear<int, CmpLin>()));
+  EXPECT_TRUE((IsLinear<std::string, CmpLin>()));
+  // Cmp has no preference, Key determines linear vs binary.
+  EXPECT_FALSE((IsLinear<Key, Cmp>()));
+  EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
+  EXPECT_FALSE((IsLinear<KeyBin, Cmp>()));
+  // arithmetic key w/ std::less or std::greater: linear
+  EXPECT_TRUE((IsLinear<int, std::less<int>>()));
+  EXPECT_TRUE((IsLinear<double, std::greater<double>>()));
+  // arithmetic key w/ custom compare: binary
+  EXPECT_FALSE((IsLinear<int, Cmp>()));
+  // non-arithmetic key: binary
+  EXPECT_FALSE((IsLinear<std::string, std::less<std::string>>()));
+}
+
 TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
   absl::btree_map<std::string, std::unique_ptr<std::string>> m;
 
   std::unique_ptr<std::string> &v = m["A"];
   EXPECT_TRUE(v == nullptr);
-  v.reset(new std::string("X"));
+  v = absl::make_unique<std::string>("X");
 
   auto iter = m.find("A");
   EXPECT_EQ("X", *iter->second);
@@ -1246,38 +1370,34 @@
   EXPECT_EQ(++it, range.second);
 }
 
-template <typename Compare, typename K>
-void AssertKeyCompareToAdapted() {
-  using Adapted = typename key_compare_to_adapter<Compare>::type;
-  static_assert(!std::is_same<Adapted, Compare>::value,
-                "key_compare_to_adapter should have adapted this comparator.");
+template <typename Compare, typename Key>
+void AssertKeyCompareStringAdapted() {
+  using Adapted = typename key_compare_adapter<Compare, Key>::type;
   static_assert(
-      std::is_same<absl::weak_ordering,
-                   absl::result_of_t<Adapted(const K &, const K &)>>::value,
-      "Adapted comparator should be a key-compare-to comparator.");
+      std::is_same<Adapted, StringBtreeDefaultLess>::value ||
+          std::is_same<Adapted, StringBtreeDefaultGreater>::value,
+      "key_compare_adapter should have string-adapted this comparator.");
 }
-template <typename Compare, typename K>
-void AssertKeyCompareToNotAdapted() {
-  using Unadapted = typename key_compare_to_adapter<Compare>::type;
+template <typename Compare, typename Key>
+void AssertKeyCompareNotStringAdapted() {
+  using Adapted = typename key_compare_adapter<Compare, Key>::type;
   static_assert(
-      std::is_same<Unadapted, Compare>::value,
-      "key_compare_to_adapter shouldn't have adapted this comparator.");
-  static_assert(
-      std::is_same<bool,
-                   absl::result_of_t<Unadapted(const K &, const K &)>>::value,
-      "Un-adapted comparator should return bool.");
+      !std::is_same<Adapted, StringBtreeDefaultLess>::value &&
+          !std::is_same<Adapted, StringBtreeDefaultGreater>::value,
+      "key_compare_adapter shouldn't have string-adapted this comparator.");
 }
 
-TEST(Btree, KeyCompareToAdapter) {
-  AssertKeyCompareToAdapted<std::less<std::string>, std::string>();
-  AssertKeyCompareToAdapted<std::greater<std::string>, std::string>();
-  AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>();
-  AssertKeyCompareToAdapted<std::greater<absl::string_view>,
-                            absl::string_view>();
-  AssertKeyCompareToAdapted<std::less<absl::Cord>, absl::Cord>();
-  AssertKeyCompareToAdapted<std::greater<absl::Cord>, absl::Cord>();
-  AssertKeyCompareToNotAdapted<std::less<int>, int>();
-  AssertKeyCompareToNotAdapted<std::greater<int>, int>();
+TEST(Btree, KeyCompareAdapter) {
+  AssertKeyCompareStringAdapted<std::less<std::string>, std::string>();
+  AssertKeyCompareStringAdapted<std::greater<std::string>, std::string>();
+  AssertKeyCompareStringAdapted<std::less<absl::string_view>,
+                                absl::string_view>();
+  AssertKeyCompareStringAdapted<std::greater<absl::string_view>,
+                                absl::string_view>();
+  AssertKeyCompareStringAdapted<std::less<absl::Cord>, absl::Cord>();
+  AssertKeyCompareStringAdapted<std::greater<absl::Cord>, absl::Cord>();
+  AssertKeyCompareNotStringAdapted<std::less<int>, int>();
+  AssertKeyCompareNotStringAdapted<std::greater<int>, int>();
 }
 
 TEST(Btree, RValueInsert) {
@@ -1327,45 +1447,25 @@
   EXPECT_EQ(tracker.swaps(), 0);
 }
 
-}  // namespace
-
-class BtreeNodePeer {
- public:
-  // Yields the size of a leaf node with a specific number of values.
-  template <typename ValueType>
-  constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
-    return btree_node<
-        set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
-                   /*TargetNodeSize=*/256,  // This parameter isn't used here.
-                   /*Multi=*/false>>::SizeWithNValues(target_values_per_node);
-  }
-
-  // Yields the number of values in a (non-root) leaf node for this set.
-  template <typename Set>
-  constexpr static size_t GetNumValuesPerNode() {
-    return btree_node<typename Set::params_type>::kNodeValues;
-  }
-
-  template <typename Set>
-  constexpr static size_t GetMaxFieldType() {
-    return std::numeric_limits<
-        typename btree_node<typename Set::params_type>::field_type>::max();
-  }
+template <typename Cmp>
+struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase {
+  using Cmp::Cmp;
+  CheckedCompareOptedOutCmp() {}
+  CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {}  // NOLINT
 };
 
-namespace {
-
-// A btree set with a specific number of values per node.
+// A btree set with a specific number of values per node. Opt out of
+// checked_compare so that we can expect exact numbers of comparisons.
 template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
 class SizedBtreeSet
     : public btree_set_container<btree<
-          set_params<Key, Cmp, std::allocator<Key>,
+          set_params<Key, CheckedCompareOptedOutCmp<Cmp>, std::allocator<Key>,
                      BtreeNodePeer::GetTargetNodeSize<Key>(TargetValuesPerNode),
                      /*Multi=*/false>>> {
   using Base = typename SizedBtreeSet::btree_set_container;
 
  public:
-  SizedBtreeSet() {}
+  SizedBtreeSet() = default;
   using Base::Base;
 };
 
@@ -1383,12 +1483,20 @@
   tracker->ResetCopiesMovesSwaps();
 }
 
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+constexpr bool kAsan = true;
+#else
+constexpr bool kAsan = false;
+#endif
+
 // Note: when the values in this test change, it is expected to have an impact
 // on performance.
 TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
+  if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
+
   InstanceTracker tracker;
   // Note: this is minimum number of values per node.
-  SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3> set3;
+  SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4> set4;
   // Note: this is the default number of values per node for a set of int32s
   // (with 64-bit pointers).
   SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61;
@@ -1399,28 +1507,29 @@
   std::vector<int> values =
       GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
 
-  EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3);
-  EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61);
-  EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100);
+  EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
+  EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
+  EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
   if (sizeof(void *) == 8) {
-    EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(),
-              BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>());
+    EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+              // When we have generations, there is one fewer slot.
+              BtreeGenerationsEnabled() ? 60 : 61);
   }
 
   // Test key insertion/deletion in random order.
-  ExpectOperationCounts(45281, 132551, values, &tracker, &set3);
+  ExpectOperationCounts(56540, 134212, values, &tracker, &set4);
   ExpectOperationCounts(386718, 129807, values, &tracker, &set61);
   ExpectOperationCounts(586761, 130310, values, &tracker, &set100);
 
   // Test key insertion/deletion in sorted order.
   std::sort(values.begin(), values.end());
-  ExpectOperationCounts(26638, 92134, values, &tracker, &set3);
+  ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
   ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
   ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
 
   // Test key insertion/deletion in reverse sorted order.
   std::reverse(values.begin(), values.end());
-  ExpectOperationCounts(49951, 119325, values, &tracker, &set3);
+  ExpectOperationCounts(54949, 127531, values, &tracker, &set4);
   ExpectOperationCounts(338813, 118266, values, &tracker, &set61);
   ExpectOperationCounts(534529, 125279, values, &tracker, &set100);
 }
@@ -1435,11 +1544,13 @@
 // Note: when the values in this test change, it is expected to have an impact
 // on performance.
 TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
+  if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
+
   InstanceTracker tracker;
   // Note: this is minimum number of values per node.
-  SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3,
+  SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4,
                 MovableOnlyInstanceThreeWayCompare>
-      set3;
+      set4;
   // Note: this is the default number of values per node for a set of int32s
   // (with 64-bit pointers).
   SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61,
@@ -1454,28 +1565,29 @@
   std::vector<int> values =
       GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
 
-  EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3);
-  EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61);
-  EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100);
+  EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
+  EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
+  EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
   if (sizeof(void *) == 8) {
-    EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(),
-              BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>());
+    EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+              // When we have generations, there is one fewer slot.
+              BtreeGenerationsEnabled() ? 60 : 61);
   }
 
   // Test key insertion/deletion in random order.
-  ExpectOperationCounts(45281, 122560, values, &tracker, &set3);
+  ExpectOperationCounts(56540, 124221, values, &tracker, &set4);
   ExpectOperationCounts(386718, 119816, values, &tracker, &set61);
   ExpectOperationCounts(586761, 120319, values, &tracker, &set100);
 
   // Test key insertion/deletion in sorted order.
   std::sort(values.begin(), values.end());
-  ExpectOperationCounts(26638, 92134, values, &tracker, &set3);
+  ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
   ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
   ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
 
   // Test key insertion/deletion in reverse sorted order.
   std::reverse(values.begin(), values.end());
-  ExpectOperationCounts(49951, 109326, values, &tracker, &set3);
+  ExpectOperationCounts(54949, 117532, values, &tracker, &set4);
   ExpectOperationCounts(338813, 108267, values, &tracker, &set61);
   ExpectOperationCounts(534529, 115280, values, &tracker, &set100);
 }
@@ -1561,10 +1673,9 @@
   auto iter = s.emplace(value_to_insert);
   ASSERT_NE(iter, s.end());
   EXPECT_EQ(*iter, value_to_insert);
-  auto iter2 = s.emplace(value_to_insert);
-  EXPECT_NE(iter2, iter);
-  ASSERT_NE(iter2, s.end());
-  EXPECT_EQ(*iter2, value_to_insert);
+  iter = s.emplace(value_to_insert);
+  ASSERT_NE(iter, s.end());
+  EXPECT_EQ(*iter, value_to_insert);
   auto result = s.equal_range(value_to_insert);
   EXPECT_EQ(std::distance(result.first, result.second), 2);
 }
@@ -1575,44 +1686,45 @@
   auto iter = s.emplace(value_to_insert);
   ASSERT_NE(iter, s.end());
   EXPECT_EQ(*iter, value_to_insert);
-  auto emplace_iter = s.emplace_hint(iter, value_to_insert);
-  EXPECT_NE(emplace_iter, iter);
-  ASSERT_NE(emplace_iter, s.end());
-  EXPECT_EQ(*emplace_iter, value_to_insert);
+  iter = s.emplace_hint(iter, value_to_insert);
+  // The new element should be before the previously inserted one.
+  EXPECT_EQ(iter, s.lower_bound(value_to_insert));
+  ASSERT_NE(iter, s.end());
+  EXPECT_EQ(*iter, value_to_insert);
 }
 
 TEST(Btree, BtreeMultimapEmplace) {
   const int key_to_insert = 123456;
   const char value0[] = "a";
-  absl::btree_multimap<int, std::string> s;
-  auto iter = s.emplace(key_to_insert, value0);
-  ASSERT_NE(iter, s.end());
+  absl::btree_multimap<int, std::string> m;
+  auto iter = m.emplace(key_to_insert, value0);
+  ASSERT_NE(iter, m.end());
   EXPECT_EQ(iter->first, key_to_insert);
   EXPECT_EQ(iter->second, value0);
   const char value1[] = "b";
-  auto iter2 = s.emplace(key_to_insert, value1);
-  EXPECT_NE(iter2, iter);
-  ASSERT_NE(iter2, s.end());
-  EXPECT_EQ(iter2->first, key_to_insert);
-  EXPECT_EQ(iter2->second, value1);
-  auto result = s.equal_range(key_to_insert);
+  iter = m.emplace(key_to_insert, value1);
+  ASSERT_NE(iter, m.end());
+  EXPECT_EQ(iter->first, key_to_insert);
+  EXPECT_EQ(iter->second, value1);
+  auto result = m.equal_range(key_to_insert);
   EXPECT_EQ(std::distance(result.first, result.second), 2);
 }
 
 TEST(Btree, BtreeMultimapEmplaceHint) {
   const int key_to_insert = 123456;
   const char value0[] = "a";
-  absl::btree_multimap<int, std::string> s;
-  auto iter = s.emplace(key_to_insert, value0);
-  ASSERT_NE(iter, s.end());
+  absl::btree_multimap<int, std::string> m;
+  auto iter = m.emplace(key_to_insert, value0);
+  ASSERT_NE(iter, m.end());
   EXPECT_EQ(iter->first, key_to_insert);
   EXPECT_EQ(iter->second, value0);
   const char value1[] = "b";
-  auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1);
-  EXPECT_NE(emplace_iter, iter);
-  ASSERT_NE(emplace_iter, s.end());
-  EXPECT_EQ(emplace_iter->first, key_to_insert);
-  EXPECT_EQ(emplace_iter->second, value1);
+  iter = m.emplace_hint(iter, key_to_insert, value1);
+  // The new element should be before the previously inserted one.
+  EXPECT_EQ(iter, m.lower_bound(key_to_insert));
+  ASSERT_NE(iter, m.end());
+  EXPECT_EQ(iter->first, key_to_insert);
+  EXPECT_EQ(iter->second, value1);
 }
 
 TEST(Btree, ConstIteratorAccessors) {
@@ -1638,10 +1750,25 @@
   EXPECT_EQ(split_set, expected_set);
 }
 
-// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they
-// convert literal 0 to int and absl::weak_ordering can only be compared with
-// literal 0. Defining this function allows for avoiding ClangTidy warnings.
-bool Identity(const bool b) { return b; }
+TEST(Btree, KeyComp) {
+  absl::btree_set<int> s;
+  EXPECT_TRUE(s.key_comp()(1, 2));
+  EXPECT_FALSE(s.key_comp()(2, 2));
+  EXPECT_FALSE(s.key_comp()(2, 1));
+
+  absl::btree_map<int, int> m1;
+  EXPECT_TRUE(m1.key_comp()(1, 2));
+  EXPECT_FALSE(m1.key_comp()(2, 2));
+  EXPECT_FALSE(m1.key_comp()(2, 1));
+
+  // Even though we internally adapt the comparator of `m2` to be three-way and
+  // heterogeneous, the comparator we expose through key_comp() is the original
+  // unadapted comparator.
+  absl::btree_map<std::string, int> m2;
+  EXPECT_TRUE(m2.key_comp()("a", "b"));
+  EXPECT_FALSE(m2.key_comp()("b", "b"));
+  EXPECT_FALSE(m2.key_comp()("b", "a"));
+}
 
 TEST(Btree, ValueComp) {
   absl::btree_set<int> s;
@@ -1654,13 +1781,29 @@
   EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0)));
   EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0)));
 
+  // Even though we internally adapt the comparator of `m2` to be three-way and
+  // heterogeneous, the comparator we expose through value_comp() is based on
+  // the original unadapted comparator.
   absl::btree_map<std::string, int> m2;
-  EXPECT_TRUE(Identity(
-      m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0));
-  EXPECT_TRUE(Identity(
-      m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0));
-  EXPECT_TRUE(Identity(
-      m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0));
+  EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)));
+  EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)));
+  EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)));
+}
+
+// Test that we have the protected members from the std::map::value_compare API.
+// See https://en.cppreference.com/w/cpp/container/map/value_compare.
+TEST(Btree, MapValueCompProtected) {
+  struct key_compare {
+    bool operator()(int l, int r) const { return l < r; }
+    int id;
+  };
+  using value_compare = absl::btree_map<int, int, key_compare>::value_compare;
+  struct value_comp_child : public value_compare {
+    explicit value_comp_child(key_compare kc) : value_compare(kc) {}
+    int GetId() const { return comp.id; }
+  };
+  value_comp_child c(key_compare{10});
+  EXPECT_EQ(c.GetId(), 10);
 }
 
 TEST(Btree, DefaultConstruction) {
@@ -1968,6 +2111,103 @@
   EXPECT_EQ(res, ++other.begin());
 }
 
+TEST(Btree, ExtractMultiMapEquivalentKeys) {
+  // Note: using string keys means a three-way comparator.
+  absl::btree_multimap<std::string, int> map;
+  for (int i = 0; i < 100; ++i) {
+    for (int j = 0; j < 100; ++j) {
+      map.insert({absl::StrCat(i), j});
+    }
+  }
+
+  for (int i = 0; i < 100; ++i) {
+    const std::string key = absl::StrCat(i);
+    auto node_handle = map.extract(key);
+    EXPECT_EQ(node_handle.key(), key);
+    EXPECT_EQ(node_handle.mapped(), 0) << i;
+  }
+
+  for (int i = 0; i < 100; ++i) {
+    const std::string key = absl::StrCat(i);
+    auto node_handle = map.extract(key);
+    EXPECT_EQ(node_handle.key(), key);
+    EXPECT_EQ(node_handle.mapped(), 1) << i;
+  }
+}
+
+TEST(Btree, ExtractAndGetNextSet) {
+  absl::btree_set<int> src = {1, 2, 3, 4, 5};
+  auto it = src.find(3);
+  auto extracted_and_next = src.extract_and_get_next(it);
+  EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
+  EXPECT_EQ(extracted_and_next.node.value(), 3);
+  EXPECT_EQ(*extracted_and_next.next, 4);
+}
+
+TEST(Btree, ExtractAndGetNextMultiSet) {
+  absl::btree_multiset<int> src = {1, 2, 3, 4, 5};
+  auto it = src.find(3);
+  auto extracted_and_next = src.extract_and_get_next(it);
+  EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
+  EXPECT_EQ(extracted_and_next.node.value(), 3);
+  EXPECT_EQ(*extracted_and_next.next, 4);
+}
+
+TEST(Btree, ExtractAndGetNextMap) {
+  absl::btree_map<int, int> src = {{1, 2}, {3, 4}, {5, 6}};
+  auto it = src.find(3);
+  auto extracted_and_next = src.extract_and_get_next(it);
+  EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6)));
+  EXPECT_EQ(extracted_and_next.node.key(), 3);
+  EXPECT_EQ(extracted_and_next.node.mapped(), 4);
+  EXPECT_THAT(*extracted_and_next.next, Pair(5, 6));
+}
+
+TEST(Btree, ExtractAndGetNextMultiMap) {
+  absl::btree_multimap<int, int> src = {{1, 2}, {3, 4}, {5, 6}};
+  auto it = src.find(3);
+  auto extracted_and_next = src.extract_and_get_next(it);
+  EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6)));
+  EXPECT_EQ(extracted_and_next.node.key(), 3);
+  EXPECT_EQ(extracted_and_next.node.mapped(), 4);
+  EXPECT_THAT(*extracted_and_next.next, Pair(5, 6));
+}
+
+TEST(Btree, ExtractAndGetNextEndIter) {
+  absl::btree_set<int> src = {1, 2, 3, 4, 5};
+  auto it = src.find(5);
+  auto extracted_and_next = src.extract_and_get_next(it);
+  EXPECT_THAT(src, ElementsAre(1, 2, 3, 4));
+  EXPECT_EQ(extracted_and_next.node.value(), 5);
+  EXPECT_EQ(extracted_and_next.next, src.end());
+}
+
+TEST(Btree, ExtractDoesntCauseExtraMoves) {
+#ifdef _MSC_VER
+  GTEST_SKIP() << "This test fails on MSVC.";
+#endif
+
+  using Set = absl::btree_set<MovableOnlyInstance>;
+  std::array<std::function<void(Set &)>, 3> extracters = {
+      [](Set &s) { auto node = s.extract(s.begin()); },
+      [](Set &s) { auto ret = s.extract_and_get_next(s.begin()); },
+      [](Set &s) { auto node = s.extract(MovableOnlyInstance(0)); }};
+
+  InstanceTracker tracker;
+  for (int i = 0; i < 3; ++i) {
+    Set s;
+    s.insert(MovableOnlyInstance(0));
+    tracker.ResetCopiesMovesSwaps();
+
+    extracters[i](s);
+    // We expect to see exactly 1 move: from the original slot into the
+    // extracted node.
+    EXPECT_EQ(tracker.copies(), 0) << i;
+    EXPECT_EQ(tracker.moves(), 1) << i;
+    EXPECT_EQ(tracker.swaps(), 0) << i;
+  }
+}
+
 // For multisets, insert with hint also affects correctness because we need to
 // insert immediately before the hint if possible.
 struct InsertMultiHintData {
@@ -2109,6 +2349,31 @@
                                Pair(4, 1), Pair(4, 4), Pair(5, 5)));
 }
 
+TEST(Btree, MergeIntoSetMovableOnly) {
+  absl::btree_set<MovableOnlyInstance> src;
+  src.insert(MovableOnlyInstance(1));
+  absl::btree_multiset<MovableOnlyInstance> dst1;
+  dst1.insert(MovableOnlyInstance(2));
+  absl::btree_set<MovableOnlyInstance> dst2;
+
+  // Test merge into multiset.
+  dst1.merge(src);
+
+  EXPECT_TRUE(src.empty());
+  // ElementsAre/ElementsAreArray don't work with move-only types.
+  ASSERT_THAT(dst1, SizeIs(2));
+  EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1));
+  EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2));
+
+  // Test merge into set.
+  dst2.merge(dst1);
+
+  EXPECT_TRUE(dst1.empty());
+  ASSERT_THAT(dst2, SizeIs(2));
+  EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1));
+  EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2));
+}
+
 struct KeyCompareToWeakOrdering {
   template <typename T>
   absl::weak_ordering operator()(const T &a, const T &b) const {
@@ -2163,7 +2428,9 @@
   };
   using Cmp = decltype(cmp);
 
-  absl::btree_map<int, int, Cmp> m(cmp);
+  // Use a map that is opted out of key_compare being adapted so we can expect
+  // strict comparison call limits.
+  absl::btree_map<int, int, CheckedCompareOptedOutCmp<Cmp>> m(cmp);
   for (int i = 0; i < 128; ++i) {
     m.emplace(i, i);
   }
@@ -2318,23 +2585,28 @@
   // Test that erase_if works with all the container types and supports lambdas.
   {
     absl::btree_set<int> s = {1, 3, 5, 6, 100};
-    erase_if(s, [](int k) { return k > 3; });
+    EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3);
     EXPECT_THAT(s, ElementsAre(1, 3));
   }
   {
     absl::btree_multiset<int> s = {1, 3, 3, 5, 6, 6, 100};
-    erase_if(s, [](int k) { return k <= 3; });
+    EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3);
     EXPECT_THAT(s, ElementsAre(5, 6, 6, 100));
   }
   {
     absl::btree_map<int, int> m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}};
-    erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; });
+    EXPECT_EQ(
+        erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; }),
+        2);
     EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3)));
   }
   {
     absl::btree_multimap<int, int> m = {{1, 1}, {3, 3}, {3, 6},
                                         {6, 6}, {6, 7}, {100, 6}};
-    erase_if(m, [](std::pair<const int, int> kv) { return kv.second == 6; });
+    EXPECT_EQ(
+        erase_if(m,
+                 [](std::pair<const int, int> kv) { return kv.second == 6; }),
+        3);
     EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7)));
   }
   // Test that erasing all elements from a large set works and test support for
@@ -2342,15 +2614,29 @@
   {
     absl::btree_set<int> s;
     for (int i = 0; i < 1000; ++i) s.insert(2 * i);
-    erase_if(s, IsEven);
+    EXPECT_EQ(erase_if(s, IsEven), 1000);
     EXPECT_THAT(s, IsEmpty());
   }
   // Test that erase_if supports other format of function pointers.
   {
     absl::btree_set<int> s = {1, 3, 5, 6, 100};
-    erase_if(s, &IsEven);
+    EXPECT_EQ(erase_if(s, &IsEven), 2);
     EXPECT_THAT(s, ElementsAre(1, 3, 5));
   }
+  // Test that erase_if invokes the predicate once per element.
+  {
+    absl::btree_set<int> s;
+    for (int i = 0; i < 1000; ++i) s.insert(i);
+    int pred_calls = 0;
+    EXPECT_EQ(erase_if(s,
+                       [&pred_calls](int k) {
+                         ++pred_calls;
+                         return k % 2;
+                       }),
+              500);
+    EXPECT_THAT(s, SizeIs(500));
+    EXPECT_EQ(pred_calls, 1000);
+  }
 }
 
 TEST(Btree, InsertOrAssign) {
@@ -2585,6 +2871,12 @@
   int i2;
 };
 
+bool operator==(const MultiKey a, const MultiKey b) {
+  return a.i1 == b.i1 && a.i2 == b.i2;
+}
+
+// A heterogeneous comparator that has different equivalence classes for
+// different lookup types.
 struct MultiKeyComp {
   using is_transparent = void;
   bool operator()(const MultiKey a, const MultiKey b) const {
@@ -2595,11 +2887,36 @@
   bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
 };
 
-// Test that when there's a heterogeneous comparator that behaves differently
-// for some heterogeneous operators, we get equal_range() right.
-TEST(Btree, MultiKeyEqualRange) {
-  absl::btree_set<MultiKey, MultiKeyComp> set;
+// A heterogeneous, three-way comparator that has different equivalence classes
+// for different lookup types.
+struct MultiKeyThreeWayComp {
+  using is_transparent = void;
+  absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const {
+    if (a.i1 < b.i1) return absl::weak_ordering::less;
+    if (a.i1 > b.i1) return absl::weak_ordering::greater;
+    if (a.i2 < b.i2) return absl::weak_ordering::less;
+    if (a.i2 > b.i2) return absl::weak_ordering::greater;
+    return absl::weak_ordering::equivalent;
+  }
+  absl::weak_ordering operator()(const int a, const MultiKey b) const {
+    if (a < b.i1) return absl::weak_ordering::less;
+    if (a > b.i1) return absl::weak_ordering::greater;
+    return absl::weak_ordering::equivalent;
+  }
+  absl::weak_ordering operator()(const MultiKey a, const int b) const {
+    if (a.i1 < b) return absl::weak_ordering::less;
+    if (a.i1 > b) return absl::weak_ordering::greater;
+    return absl::weak_ordering::equivalent;
+  }
+};
 
+template <typename Compare>
+class BtreeMultiKeyTest : public ::testing::Test {};
+using MultiKeyComps = ::testing::Types<MultiKeyComp, MultiKeyThreeWayComp>;
+TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps);
+
+TYPED_TEST(BtreeMultiKeyTest, EqualRange) {
+  absl::btree_set<MultiKey, TypeParam> set;
   for (int i = 0; i < 100; ++i) {
     for (int j = 0; j < 100; ++j) {
       set.insert({i, j});
@@ -2609,11 +2926,684 @@
   for (int i = 0; i < 100; ++i) {
     auto equal_range = set.equal_range(i);
     EXPECT_EQ(equal_range.first->i1, i);
-    EXPECT_EQ(equal_range.first->i2, 0);
+    EXPECT_EQ(equal_range.first->i2, 0) << i;
     EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
   }
 }
 
+TYPED_TEST(BtreeMultiKeyTest, Extract) {
+  absl::btree_set<MultiKey, TypeParam> set;
+  for (int i = 0; i < 100; ++i) {
+    for (int j = 0; j < 100; ++j) {
+      set.insert({i, j});
+    }
+  }
+
+  for (int i = 0; i < 100; ++i) {
+    auto node_handle = set.extract(i);
+    EXPECT_EQ(node_handle.value().i1, i);
+    EXPECT_EQ(node_handle.value().i2, 0) << i;
+  }
+
+  for (int i = 0; i < 100; ++i) {
+    auto node_handle = set.extract(i);
+    EXPECT_EQ(node_handle.value().i1, i);
+    EXPECT_EQ(node_handle.value().i2, 1) << i;
+  }
+}
+
+TYPED_TEST(BtreeMultiKeyTest, Erase) {
+  absl::btree_set<MultiKey, TypeParam> set = {
+      {1, 1}, {2, 1}, {2, 2}, {3, 1}};
+  EXPECT_EQ(set.erase(2), 2);
+  EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
+}
+
+TYPED_TEST(BtreeMultiKeyTest, Count) {
+  const absl::btree_set<MultiKey, TypeParam> set = {
+      {1, 1}, {2, 1}, {2, 2}, {3, 1}};
+  EXPECT_EQ(set.count(2), 2);
+}
+
+TEST(Btree, AllocConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  Set set(alloc);
+
+  set.insert({1, 2, 3});
+
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocInitializerListConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  Set set({1, 2, 3}, alloc);
+
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocRangeConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  std::vector<int> v = {1, 2, 3};
+  Set set(v.begin(), v.end(), alloc);
+
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocCopyConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used1 = 0;
+  Alloc alloc1(&bytes_used1);
+  Set set1(alloc1);
+
+  set1.insert({1, 2, 3});
+
+  int64_t bytes_used2 = 0;
+  Alloc alloc2(&bytes_used2);
+  Set set2(set1, alloc2);
+
+  EXPECT_THAT(set1, ElementsAre(1, 2, 3));
+  EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used1, set1.size() * sizeof(int));
+  EXPECT_EQ(bytes_used1, bytes_used2);
+}
+
+TEST(Btree, AllocMoveConstructor_SameAlloc) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  Set set1(alloc);
+
+  set1.insert({1, 2, 3});
+
+  const int64_t original_bytes_used = bytes_used;
+  EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
+
+  Set set2(std::move(set1), alloc);
+
+  EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+  EXPECT_EQ(bytes_used, original_bytes_used);
+}
+
+TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used1 = 0;
+  Alloc alloc1(&bytes_used1);
+  Set set1(alloc1);
+
+  set1.insert({1, 2, 3});
+
+  const int64_t original_bytes_used = bytes_used1;
+  EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
+
+  int64_t bytes_used2 = 0;
+  Alloc alloc2(&bytes_used2);
+  Set set2(std::move(set1), alloc2);
+
+  EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+  // We didn't free these bytes allocated by `set1` yet.
+  EXPECT_EQ(bytes_used1, original_bytes_used);
+  EXPECT_EQ(bytes_used2, original_bytes_used);
+}
+
+bool IntCmp(const int a, const int b) { return a < b; }
+
+TEST(Btree, SupportsFunctionPtrComparator) {
+  absl::btree_set<int, decltype(IntCmp) *> set(IntCmp);
+  set.insert({1, 2, 3});
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_TRUE(set.key_comp()(1, 2));
+  EXPECT_TRUE(set.value_comp()(1, 2));
+
+  absl::btree_map<int, int, decltype(IntCmp) *> map(&IntCmp);
+  map[1] = 1;
+  EXPECT_THAT(map, ElementsAre(Pair(1, 1)));
+  EXPECT_TRUE(map.key_comp()(1, 2));
+  EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2)));
+}
+
+template <typename Compare>
+struct TransparentPassThroughComp {
+  using is_transparent = void;
+
+  // This will fail compilation if we attempt a comparison that Compare does not
+  // support, and the failure will happen inside the function implementation so
+  // it can't be avoided by using SFINAE on this comparator.
+  template <typename T, typename U>
+  bool operator()(const T &lhs, const U &rhs) const {
+    return Compare()(lhs, rhs);
+  }
+};
+
+TEST(Btree,
+     SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) {
+  absl::btree_set<MultiKey, TransparentPassThroughComp<MultiKeyComp>> set;
+  set.insert(MultiKey{1, 2});
+  EXPECT_TRUE(set.contains(1));
+}
+
+TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) {
+  absl::btree_set<MultiKey, MultiKeyComp> set = {{}, MultiKeyComp{}};
+}
+
+TEST(Btree, InvalidComparatorsCaught) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  {
+    struct ZeroAlwaysLessCmp {
+      bool operator()(int lhs, int rhs) const {
+        if (lhs == 0) return true;
+        return lhs < rhs;
+      }
+    };
+    absl::btree_set<int, ZeroAlwaysLessCmp> set;
+    EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
+  }
+  {
+    struct ThreeWayAlwaysLessCmp {
+      absl::weak_ordering operator()(int, int) const {
+        return absl::weak_ordering::less;
+      }
+    };
+    absl::btree_set<int, ThreeWayAlwaysLessCmp> set;
+    EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
+  }
+  {
+    struct SumGreaterZeroCmp {
+      bool operator()(int lhs, int rhs) const {
+        // First, do equivalence correctly - so we can test later condition.
+        if (lhs == rhs) return false;
+        return lhs + rhs > 0;
+      }
+    };
+    absl::btree_set<int, SumGreaterZeroCmp> set;
+    // Note: '!' only needs to be escaped when it's the first character.
+    EXPECT_DEATH(set.insert({0, 1, 2}),
+                 R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex");
+  }
+  {
+    struct ThreeWaySumGreaterZeroCmp {
+      absl::weak_ordering operator()(int lhs, int rhs) const {
+        // First, do equivalence correctly - so we can test later condition.
+        if (lhs == rhs) return absl::weak_ordering::equivalent;
+
+        if (lhs + rhs > 0) return absl::weak_ordering::less;
+        if (lhs + rhs == 0) return absl::weak_ordering::equivalent;
+        return absl::weak_ordering::greater;
+      }
+    };
+    absl::btree_set<int, ThreeWaySumGreaterZeroCmp> set;
+    EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
+  }
+  // Verify that we detect cases of comparators that violate transitivity.
+  // When the comparators below check for the presence of an optional field,
+  // they violate transitivity because instances that have the optional field
+  // compare differently with each other from how they compare with instances
+  // that don't have the optional field.
+  struct ClockTime {
+    absl::optional<int> hour;
+    int minute;
+  };
+  // `comp(a,b) && comp(b,c) && !comp(a,c)` violates transitivity.
+  ClockTime a = {absl::nullopt, 1};
+  ClockTime b = {2, 5};
+  ClockTime c = {6, 0};
+  {
+    struct NonTransitiveTimeCmp {
+      bool operator()(ClockTime lhs, ClockTime rhs) const {
+        if (lhs.hour.has_value() && rhs.hour.has_value() &&
+            *lhs.hour != *rhs.hour) {
+          return *lhs.hour < *rhs.hour;
+        }
+        return lhs.minute < rhs.minute;
+      }
+    };
+    NonTransitiveTimeCmp cmp;
+    ASSERT_TRUE(cmp(a, b) && cmp(b, c) && !cmp(a, c));
+    absl::btree_set<ClockTime, NonTransitiveTimeCmp> set;
+    EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
+    absl::btree_multiset<ClockTime, NonTransitiveTimeCmp> mset;
+    EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
+  }
+  {
+    struct ThreeWayNonTransitiveTimeCmp {
+      absl::weak_ordering operator()(ClockTime lhs, ClockTime rhs) const {
+        if (lhs.hour.has_value() && rhs.hour.has_value() &&
+            *lhs.hour != *rhs.hour) {
+          return *lhs.hour < *rhs.hour ? absl::weak_ordering::less
+                                       : absl::weak_ordering::greater;
+        }
+        return lhs.minute < rhs.minute    ? absl::weak_ordering::less
+               : lhs.minute == rhs.minute ? absl::weak_ordering::equivalent
+                                          : absl::weak_ordering::greater;
+      }
+    };
+    ThreeWayNonTransitiveTimeCmp cmp;
+    ASSERT_TRUE(cmp(a, b) < 0 && cmp(b, c) < 0 && cmp(a, c) > 0);
+    absl::btree_set<ClockTime, ThreeWayNonTransitiveTimeCmp> set;
+    EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
+    absl::btree_multiset<ClockTime, ThreeWayNonTransitiveTimeCmp> mset;
+    EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
+  }
+}
+
+TEST(Btree, MutatedKeysCaught) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  struct IntPtrCmp {
+    bool operator()(int *lhs, int *rhs) const { return *lhs < *rhs; }
+  };
+  {
+    absl::btree_set<int *, IntPtrCmp> set;
+    int arr[] = {0, 1, 2};
+    set.insert({&arr[0], &arr[1], &arr[2]});
+    arr[0] = 100;
+    EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
+  }
+  {
+    absl::btree_multiset<int *, IntPtrCmp> set;
+    int arr[] = {0, 1, 2};
+    set.insert({&arr[0], &arr[0], &arr[1], &arr[1], &arr[2], &arr[2]});
+    arr[0] = 100;
+    EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
+  }
+}
+
+#ifndef _MSC_VER
+// This test crashes on MSVC.
+TEST(Btree, InvalidIteratorUse) {
+  if (!BtreeGenerationsEnabled())
+    GTEST_SKIP() << "Generation validation for iterators is disabled.";
+
+  // Invalid memory use can trigger heap-use-after-free in ASan or invalidated
+  // iterator assertions.
+  constexpr const char *kInvalidMemoryDeathMessage =
+      "heap-use-after-free|invalidated iterator";
+
+  {
+    absl::btree_set<int> set;
+    for (int i = 0; i < 10; ++i) set.insert(i);
+    auto it = set.begin();
+    set.erase(it++);
+    EXPECT_DEATH(set.erase(it++), kInvalidMemoryDeathMessage);
+  }
+  {
+    absl::btree_set<int> set;
+    for (int i = 0; i < 10; ++i) set.insert(i);
+    auto it = set.insert(20).first;
+    set.insert(30);
+    EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
+  }
+  {
+    absl::btree_set<int> set;
+    for (int i = 0; i < 10000; ++i) set.insert(i);
+    auto it = set.find(5000);
+    ASSERT_NE(it, set.end());
+    set.erase(1);
+    EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
+  }
+  {
+    absl::btree_set<int> set;
+    for (int i = 0; i < 10; ++i) set.insert(i);
+    auto it = set.insert(20).first;
+    set.insert(30);
+    EXPECT_DEATH(void(it == set.begin()), kInvalidMemoryDeathMessage);
+    EXPECT_DEATH(void(set.begin() == it), kInvalidMemoryDeathMessage);
+  }
+}
+#endif
+
+class OnlyConstructibleByAllocator {
+  explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
+
+ public:
+  OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
+      : i_(other.i_) {}
+  OnlyConstructibleByAllocator &operator=(
+      const OnlyConstructibleByAllocator &other) {
+    i_ = other.i_;
+    return *this;
+  }
+  int Get() const { return i_; }
+  bool operator==(int i) const { return i_ == i; }
+
+ private:
+  template <typename T>
+  friend class OnlyConstructibleAllocator;
+
+  int i_;
+};
+
+template <typename T = OnlyConstructibleByAllocator>
+class OnlyConstructibleAllocator : public std::allocator<T> {
+ public:
+  OnlyConstructibleAllocator() = default;
+  template <class U>
+  explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
+
+  void construct(OnlyConstructibleByAllocator *p, int i) {
+    new (p) OnlyConstructibleByAllocator(i);
+  }
+  template <typename Pair>
+  void construct(Pair *p, const int i) {
+    OnlyConstructibleByAllocator only(i);
+    new (p) Pair(std::move(only), i);
+  }
+
+  template <class U>
+  struct rebind {
+    using other = OnlyConstructibleAllocator<U>;
+  };
+};
+
+struct OnlyConstructibleByAllocatorComp {
+  using is_transparent = void;
+  bool operator()(OnlyConstructibleByAllocator a,
+                  OnlyConstructibleByAllocator b) const {
+    return a.Get() < b.Get();
+  }
+  bool operator()(int a, OnlyConstructibleByAllocator b) const {
+    return a < b.Get();
+  }
+  bool operator()(OnlyConstructibleByAllocator a, int b) const {
+    return a.Get() < b;
+  }
+};
+
+TEST(Btree, OnlyConstructibleByAllocatorType) {
+  const std::array<int, 2> arr = {3, 4};
+  {
+    absl::btree_set<OnlyConstructibleByAllocator,
+                    OnlyConstructibleByAllocatorComp,
+                    OnlyConstructibleAllocator<>>
+        set;
+    set.emplace(1);
+    set.emplace_hint(set.end(), 2);
+    set.insert(arr.begin(), arr.end());
+    EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
+  }
+  {
+    absl::btree_multiset<OnlyConstructibleByAllocator,
+                         OnlyConstructibleByAllocatorComp,
+                         OnlyConstructibleAllocator<>>
+        set;
+    set.emplace(1);
+    set.emplace_hint(set.end(), 2);
+    // TODO(ezb): fix insert_multi to allow this to compile.
+    // set.insert(arr.begin(), arr.end());
+    EXPECT_THAT(set, ElementsAre(1, 2));
+  }
+  {
+    absl::btree_map<OnlyConstructibleByAllocator, int,
+                    OnlyConstructibleByAllocatorComp,
+                    OnlyConstructibleAllocator<>>
+        map;
+    map.emplace(1);
+    map.emplace_hint(map.end(), 2);
+    map.insert(arr.begin(), arr.end());
+    EXPECT_THAT(map,
+                ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+  }
+  {
+    absl::btree_multimap<OnlyConstructibleByAllocator, int,
+                         OnlyConstructibleByAllocatorComp,
+                         OnlyConstructibleAllocator<>>
+        map;
+    map.emplace(1);
+    map.emplace_hint(map.end(), 2);
+    // TODO(ezb): fix insert_multi to allow this to compile.
+    // map.insert(arr.begin(), arr.end());
+    EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2)));
+  }
+}
+
+class NotAssignable {
+ public:
+  explicit NotAssignable(int i) : i_(i) {}
+  NotAssignable(const NotAssignable &other) : i_(other.i_) {}
+  NotAssignable &operator=(NotAssignable &&other) = delete;
+  int Get() const { return i_; }
+  bool operator==(int i) const { return i_ == i; }
+  friend bool operator<(NotAssignable a, NotAssignable b) {
+    return a.i_ < b.i_;
+  }
+
+ private:
+  int i_;
+};
+
+TEST(Btree, NotAssignableType) {
+  {
+    absl::btree_set<NotAssignable> set;
+    set.emplace(1);
+    set.emplace_hint(set.end(), 2);
+    set.insert(NotAssignable(3));
+    set.insert(set.end(), NotAssignable(4));
+    EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
+    set.erase(set.begin());
+    EXPECT_THAT(set, ElementsAre(2, 3, 4));
+  }
+  {
+    absl::btree_multiset<NotAssignable> set;
+    set.emplace(1);
+    set.emplace_hint(set.end(), 2);
+    set.insert(NotAssignable(2));
+    set.insert(set.end(), NotAssignable(3));
+    EXPECT_THAT(set, ElementsAre(1, 2, 2, 3));
+    set.erase(set.begin());
+    EXPECT_THAT(set, ElementsAre(2, 2, 3));
+  }
+  {
+    absl::btree_map<NotAssignable, int> map;
+    map.emplace(NotAssignable(1), 1);
+    map.emplace_hint(map.end(), NotAssignable(2), 2);
+    map.insert({NotAssignable(3), 3});
+    map.insert(map.end(), {NotAssignable(4), 4});
+    EXPECT_THAT(map,
+                ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+    map.erase(map.begin());
+    EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+  }
+  {
+    absl::btree_multimap<NotAssignable, int> map;
+    map.emplace(NotAssignable(1), 1);
+    map.emplace_hint(map.end(), NotAssignable(2), 2);
+    map.insert({NotAssignable(2), 3});
+    map.insert(map.end(), {NotAssignable(3), 3});
+    EXPECT_THAT(map,
+                ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3)));
+    map.erase(map.begin());
+    EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3)));
+  }
+}
+
+struct ArenaLike {
+  void* recycled = nullptr;
+  size_t recycled_size = 0;
+};
+
+// A very simple implementation of arena allocation.
+template <typename T>
+class ArenaLikeAllocator : public std::allocator<T> {
+ public:
+  // Standard library containers require the ability to allocate objects of
+  // different types which they can do so via rebind.other.
+  template <typename U>
+  struct rebind {
+    using other = ArenaLikeAllocator<U>;
+  };
+
+  explicit ArenaLikeAllocator(ArenaLike* arena) noexcept : arena_(arena) {}
+
+  ~ArenaLikeAllocator() {
+    if (arena_->recycled != nullptr) {
+      delete [] static_cast<T*>(arena_->recycled);
+      arena_->recycled = nullptr;
+    }
+  }
+
+  template<typename U>
+  explicit ArenaLikeAllocator(const ArenaLikeAllocator<U>& other) noexcept
+      : arena_(other.arena_) {}
+
+  T* allocate(size_t num_objects, const void* = nullptr) {
+    size_t size = num_objects * sizeof(T);
+    if (arena_->recycled != nullptr && arena_->recycled_size == size) {
+      T* result = static_cast<T*>(arena_->recycled);
+      arena_->recycled = nullptr;
+      return result;
+    }
+    return new T[num_objects];
+  }
+
+  void deallocate(T* p, size_t num_objects) {
+    size_t size = num_objects * sizeof(T);
+
+    // Simulate writing to the freed memory as an actual arena allocator might
+    // do. This triggers an error report if the memory is poisoned.
+    memset(p, 0xde, size);
+
+    if (arena_->recycled == nullptr) {
+      arena_->recycled = p;
+      arena_->recycled_size = size;
+    } else {
+      delete [] p;
+    }
+  }
+
+  ArenaLike* arena_;
+};
+
+// This test verifies that an arena allocator that reuses memory will not be
+// asked to free poisoned BTree memory.
+TEST(Btree, ReusePoisonMemory) {
+  using Alloc = ArenaLikeAllocator<int64_t>;
+  using Set = absl::btree_set<int64_t, std::less<int64_t>, Alloc>;
+  ArenaLike arena;
+  Alloc alloc(&arena);
+  Set set(alloc);
+
+  set.insert(0);
+  set.erase(0);
+  set.insert(0);
+}
+
+TEST(Btree, IteratorSubtraction) {
+  absl::BitGen bitgen;
+  std::vector<int> vec;
+  // Randomize the set's insertion order so the nodes aren't all full.
+  for (int i = 0; i < 1000000; ++i) vec.push_back(i);
+  absl::c_shuffle(vec, bitgen);
+
+  absl::btree_set<int> set;
+  for (int i : vec) set.insert(i);
+
+  for (int i = 0; i < 1000; ++i) {
+    size_t begin = absl::Uniform(bitgen, 0u, set.size());
+    size_t end = absl::Uniform(bitgen, begin, set.size());
+    ASSERT_EQ(end - begin, set.find(end) - set.find(begin))
+        << begin << " " << end;
+  }
+}
+
+TEST(Btree, DereferencingEndIterator) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  absl::btree_set<int> set;
+  for (int i = 0; i < 1000; ++i) set.insert(i);
+  EXPECT_DEATH(*set.end(), R"regex(Dereferencing end\(\) iterator)regex");
+}
+
+TEST(Btree, InvalidIteratorComparison) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  absl::btree_set<int> set1, set2;
+  for (int i = 0; i < 1000; ++i) {
+    set1.insert(i);
+    set2.insert(i);
+  }
+
+  constexpr const char *kValueInitDeathMessage =
+      "Comparing default-constructed iterator with .*non-default-constructed "
+      "iterator";
+  typename absl::btree_set<int>::iterator iter1, iter2;
+  EXPECT_EQ(iter1, iter2);
+  EXPECT_DEATH(void(set1.begin() == iter1), kValueInitDeathMessage);
+  EXPECT_DEATH(void(iter1 == set1.begin()), kValueInitDeathMessage);
+
+  constexpr const char *kDifferentContainerDeathMessage =
+      "Comparing iterators from different containers";
+  iter1 = set1.begin();
+  iter2 = set2.begin();
+  EXPECT_DEATH(void(iter1 == iter2), kDifferentContainerDeathMessage);
+  EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage);
+}
+
+TEST(Btree, InvalidPointerUse) {
+  if (!kAsan)
+    GTEST_SKIP() << "We only detect invalid pointer use in ASan mode.";
+
+  absl::btree_set<int> set;
+  set.insert(0);
+  const int *ptr = &*set.begin();
+  set.insert(1);
+  EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+  size_t slots_per_node = BtreeNodePeer::GetNumSlotsPerNode<decltype(set)>();
+  for (int i = 2; i < slots_per_node - 1; ++i) set.insert(i);
+  ptr = &*set.begin();
+  set.insert(static_cast<int>(slots_per_node));
+  EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+}
+
+template<typename Set>
+void TestBasicFunctionality(Set set) {
+  using value_type = typename Set::value_type;
+  for (int i = 0; i < 100; ++i) { set.insert(value_type(i)); }
+  for (int i = 50; i < 100; ++i) { set.erase(value_type(i)); }
+  auto it = set.begin();
+  for (int i = 0; i < 50; ++i, ++it) {
+    ASSERT_EQ(set.find(value_type(i)), it) << i;
+  }
+}
+
+template<size_t align>
+struct alignas(align) OveralignedKey {
+  explicit OveralignedKey(int i) : key(i) {}
+  bool operator<(const OveralignedKey &other) const { return key < other.key; }
+  int key = 0;
+};
+
+TEST(Btree, OveralignedKey) {
+  // Test basic functionality with both even and odd numbers of slots per node.
+  // The goal here is to detect cases where alignment may be incorrect.
+  TestBasicFunctionality(
+      SizedBtreeSet<OveralignedKey<16>, /*TargetValuesPerNode=*/8>());
+  TestBasicFunctionality(
+      SizedBtreeSet<OveralignedKey<16>, /*TargetValuesPerNode=*/9>());
+}
+
+TEST(Btree, FieldTypeEqualsSlotType) {
+  // This breaks if we try to do layout_type::Pointer<slot_type> because
+  // slot_type is the same as field_type.
+  using set_type = absl::btree_set<uint8_t>;
+  static_assert(BtreeNodePeer::FieldTypeEqualsSlotType<set_type>(), "");
+  TestBasicFunctionality(set_type());
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/fixed_array.h b/abseil-cpp/absl/container/fixed_array.h
index c8fe8d9..9f1c813 100644
--- a/abseil-cpp/absl/container/fixed_array.h
+++ b/abseil-cpp/absl/container/fixed_array.h
@@ -62,22 +62,16 @@
 // A `FixedArray` provides a run-time fixed-size array, allocating a small array
 // inline for efficiency.
 //
-// Most users should not specify an `inline_elements` argument and let
-// `FixedArray` automatically determine the number of elements
-// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
-// `FixedArray` implementation will use inline storage for arrays with a
-// length <= `inline_elements`.
+// Most users should not specify the `N` template parameter and let `FixedArray`
+// automatically determine the number of elements to store inline based on
+// `sizeof(T)`. If `N` is specified, the `FixedArray` implementation will use
+// inline storage for arrays with a length <= `N`.
 //
 // Note that a `FixedArray` constructed with a `size_type` argument will
 // default-initialize its values by leaving trivially constructible types
 // uninitialized (e.g. int, int[4], double), and others default-constructed.
 // This matches the behavior of c-style arrays and `std::array`, but not
 // `std::vector`.
-//
-// Note that `FixedArray` does not provide a public allocator; if it requires a
-// heap allocation, it will do so with global `::operator new[]()` and
-// `::operator delete[]()`, even if T provides class-scope overrides for these
-// operators.
 template <typename T, size_t N = kFixedArrayUseDefault,
           typename A = std::allocator<T>>
 class FixedArray {
@@ -123,14 +117,20 @@
       (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
                                   : static_cast<size_type>(N));
 
-  FixedArray(
-      const FixedArray& other,
-      const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable())
+  FixedArray(const FixedArray& other) noexcept(NoexceptCopyable())
+      : FixedArray(other,
+                   AllocatorTraits::select_on_container_copy_construction(
+                       other.storage_.alloc())) {}
+
+  FixedArray(const FixedArray& other,
+             const allocator_type& a) noexcept(NoexceptCopyable())
       : FixedArray(other.begin(), other.end(), a) {}
 
-  FixedArray(
-      FixedArray&& other,
-      const allocator_type& a = allocator_type()) noexcept(NoexceptMovable())
+  FixedArray(FixedArray&& other) noexcept(NoexceptMovable())
+      : FixedArray(std::move(other), other.storage_.alloc()) {}
+
+  FixedArray(FixedArray&& other,
+             const allocator_type& a) noexcept(NoexceptMovable())
       : FixedArray(std::make_move_iterator(other.begin()),
                    std::make_move_iterator(other.end()), a) {}
 
@@ -206,18 +206,22 @@
   //
   // Returns a const T* pointer to elements of the `FixedArray`. This pointer
   // can be used to access (but not modify) the contained elements.
-  const_pointer data() const { return AsValueType(storage_.begin()); }
+  const_pointer data() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return AsValueType(storage_.begin());
+  }
 
   // Overload of FixedArray::data() to return a T* pointer to elements of the
   // fixed array. This pointer can be used to access and modify the contained
   // elements.
-  pointer data() { return AsValueType(storage_.begin()); }
+  pointer data() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return AsValueType(storage_.begin());
+  }
 
   // FixedArray::operator[]
   //
   // Returns a reference the ith element of the fixed array.
   // REQUIRES: 0 <= i < size()
-  reference operator[](size_type i) {
+  reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(i < size());
     return data()[i];
   }
@@ -225,16 +229,16 @@
   // Overload of FixedArray::operator()[] to return a const reference to the
   // ith element of the fixed array.
   // REQUIRES: 0 <= i < size()
-  const_reference operator[](size_type i) const {
+  const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(i < size());
     return data()[i];
   }
 
   // FixedArray::at
   //
-  // Bounds-checked access.  Returns a reference to the ith element of the
-  // fiexed array, or throws std::out_of_range
-  reference at(size_type i) {
+  // Bounds-checked access.  Returns a reference to the ith element of the fixed
+  // array, or throws std::out_of_range
+  reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (ABSL_PREDICT_FALSE(i >= size())) {
       base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
     }
@@ -243,7 +247,7 @@
 
   // Overload of FixedArray::at() to return a const reference to the ith element
   // of the fixed array.
-  const_reference at(size_type i) const {
+  const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (ABSL_PREDICT_FALSE(i >= size())) {
       base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
     }
@@ -253,14 +257,14 @@
   // FixedArray::front()
   //
   // Returns a reference to the first element of the fixed array.
-  reference front() {
+  reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[0];
   }
 
   // Overload of FixedArray::front() to return a reference to the first element
   // of a fixed array of const values.
-  const_reference front() const {
+  const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[0];
   }
@@ -268,14 +272,14 @@
   // FixedArray::back()
   //
   // Returns a reference to the last element of the fixed array.
-  reference back() {
+  reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[size() - 1];
   }
 
   // Overload of FixedArray::back() to return a reference to the last element
   // of a fixed array of const values.
-  const_reference back() const {
+  const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[size() - 1];
   }
@@ -283,62 +287,74 @@
   // FixedArray::begin()
   //
   // Returns an iterator to the beginning of the fixed array.
-  iterator begin() { return data(); }
+  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
 
   // Overload of FixedArray::begin() to return a const iterator to the
   // beginning of the fixed array.
-  const_iterator begin() const { return data(); }
+  const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
 
   // FixedArray::cbegin()
   //
   // Returns a const iterator to the beginning of the fixed array.
-  const_iterator cbegin() const { return begin(); }
+  const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return begin();
+  }
 
   // FixedArray::end()
   //
   // Returns an iterator to the end of the fixed array.
-  iterator end() { return data() + size(); }
+  iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data() + size(); }
 
   // Overload of FixedArray::end() to return a const iterator to the end of the
   // fixed array.
-  const_iterator end() const { return data() + size(); }
+  const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return data() + size();
+  }
 
   // FixedArray::cend()
   //
   // Returns a const iterator to the end of the fixed array.
-  const_iterator cend() const { return end(); }
+  const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
 
   // FixedArray::rbegin()
   //
   // Returns a reverse iterator from the end of the fixed array.
-  reverse_iterator rbegin() { return reverse_iterator(end()); }
+  reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return reverse_iterator(end());
+  }
 
   // Overload of FixedArray::rbegin() to return a const reverse iterator from
   // the end of the fixed array.
-  const_reverse_iterator rbegin() const {
+  const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_reverse_iterator(end());
   }
 
   // FixedArray::crbegin()
   //
   // Returns a const reverse iterator from the end of the fixed array.
-  const_reverse_iterator crbegin() const { return rbegin(); }
+  const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return rbegin();
+  }
 
   // FixedArray::rend()
   //
   // Returns a reverse iterator from the beginning of the fixed array.
-  reverse_iterator rend() { return reverse_iterator(begin()); }
+  reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return reverse_iterator(begin());
+  }
 
   // Overload of FixedArray::rend() for returning a const reverse iterator
   // from the beginning of the fixed array.
-  const_reverse_iterator rend() const {
+  const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_reverse_iterator(begin());
   }
 
   // FixedArray::crend()
   //
   // Returns a reverse iterator from the beginning of the fixed array.
-  const_reverse_iterator crend() const { return rend(); }
+  const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return rend();
+  }
 
   // FixedArray::fill()
   //
@@ -348,7 +364,7 @@
   // Relational operators. Equality operators are elementwise using
   // `operator==`, while order operators order FixedArrays lexicographically.
   friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
-    return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+    return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
   }
 
   friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
@@ -470,12 +486,18 @@
     StorageElement* begin() const { return data_; }
     StorageElement* end() const { return begin() + size(); }
     allocator_type& alloc() { return size_alloc_.template get<1>(); }
+    const allocator_type& alloc() const {
+      return size_alloc_.template get<1>();
+    }
 
    private:
     static bool UsingInlinedStorage(size_type n) {
       return n <= inline_elements;
     }
 
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+    ABSL_ATTRIBUTE_NOINLINE
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
     StorageElement* InitializeData() {
       if (UsingInlinedStorage(size())) {
         InlinedStorage::AnnotateConstruct(size());
@@ -494,12 +516,14 @@
   Storage storage_;
 };
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 template <typename T, size_t N, typename A>
 constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
 
 template <typename T, size_t N, typename A>
 constexpr typename FixedArray<T, N, A>::size_type
     FixedArray<T, N, A>::inline_elements;
+#endif
 
 template <typename T, size_t N, typename A>
 void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
diff --git a/abseil-cpp/absl/container/fixed_array_benchmark.cc b/abseil-cpp/absl/container/fixed_array_benchmark.cc
index 3c7a5a7..db6663e 100644
--- a/abseil-cpp/absl/container/fixed_array_benchmark.cc
+++ b/abseil-cpp/absl/container/fixed_array_benchmark.cc
@@ -16,8 +16,8 @@
 
 #include <string>
 
-#include "benchmark/benchmark.h"
 #include "absl/container/fixed_array.h"
+#include "benchmark/benchmark.h"
 
 namespace {
 
diff --git a/abseil-cpp/absl/container/fixed_array_test.cc b/abseil-cpp/absl/container/fixed_array_test.cc
index 49598e7..9dbf2a8 100644
--- a/abseil-cpp/absl/container/fixed_array_test.cc
+++ b/abseil-cpp/absl/container/fixed_array_test.cc
@@ -768,6 +768,22 @@
   }
 }
 
+TEST(AllocatorSupportTest, PropagatesStatefulAllocator) {
+  constexpr size_t inlined_size = 4;
+  using Alloc = absl::container_internal::CountingAllocator<int>;
+  using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+  auto len = inlined_size * 2;
+  auto val = 0;
+  int64_t allocated = 0;
+  AllocFxdArr arr(len, val, Alloc(&allocated));
+
+  EXPECT_EQ(allocated, len * sizeof(int));
+
+  AllocFxdArr copy = arr;
+  EXPECT_EQ(allocated, len * sizeof(int) * 2);
+}
+
 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
 TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
   absl::FixedArray<int, 32> a(10);
diff --git a/abseil-cpp/absl/container/flat_hash_map.h b/abseil-cpp/absl/container/flat_hash_map.h
index 74def0d..8f4d993 100644
--- a/abseil-cpp/absl/container/flat_hash_map.h
+++ b/abseil-cpp/absl/container/flat_hash_map.h
@@ -36,6 +36,7 @@
 #include <utility>
 
 #include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
 #include "absl/container/internal/raw_hash_map.h"  // IWYU pragma: export
@@ -75,6 +76,10 @@
 // absl/hash/hash.h for information on extending Abseil hashing to user-defined
 // types.
 //
+// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
 // NOTE: A `flat_hash_map` stores its value types directly inside its
 // implementation array to avoid memory indirection. Because a `flat_hash_map`
 // is designed to move data when rehashed, map values will not retain pointer
@@ -230,7 +235,11 @@
   // iterator erase(const_iterator first, const_iterator last):
   //
   //   Erases the elements in the open interval [`first`, `last`), returning an
-  //   iterator pointing to `last`.
+  //   iterator pointing to `last`. The special case of calling
+  //   `erase(begin(), end())` resets the reserved growth such that if
+  //   `reserve(N)` has previously been called and there has been no intervening
+  //   call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+  //   to assume that inserting N elements will not cause a rehash.
   //
   // size_type erase(const key_type& key):
   //
@@ -356,8 +365,8 @@
   // `flat_hash_map`.
   //
   //   iterator try_emplace(const_iterator hint,
-  //                        const init_type& k, Args&&... args):
-  //   iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+  //                        const key_type& k, Args&&... args):
+  //   iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
   //
   // Inserts (via copy or move) the element of the specified key into the
   // `flat_hash_map` using the position of `hint` as a non-binding suggestion
@@ -541,10 +550,12 @@
 // erase_if(flat_hash_map<>, Pred)
 //
 // Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
 template <typename K, typename V, typename H, typename E, typename A,
           typename Predicate>
-void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
-  container_internal::EraseIf(pred, &c);
+typename flat_hash_map<K, V, H, E, A>::size_type erase_if(
+    flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
+  return container_internal::EraseIf(pred, &c);
 }
 
 namespace container_internal {
diff --git a/abseil-cpp/absl/container/flat_hash_map_test.cc b/abseil-cpp/absl/container/flat_hash_map_test.cc
index 89ec60c..e6acbea 100644
--- a/abseil-cpp/absl/container/flat_hash_map_test.cc
+++ b/abseil-cpp/absl/container/flat_hash_map_test.cc
@@ -16,12 +16,12 @@
 
 #include <memory>
 
-#include "absl/base/internal/raw_logging.h"
 #include "absl/container/internal/hash_generator_testing.h"
 #include "absl/container/internal/unordered_map_constructor_test.h"
 #include "absl/container/internal/unordered_map_lookup_test.h"
 #include "absl/container/internal/unordered_map_members_test.h"
 #include "absl/container/internal/unordered_map_modifiers_test.h"
+#include "absl/log/check.h"
 #include "absl/types/any.h"
 
 namespace absl {
@@ -40,10 +40,10 @@
   BeforeMain() {
     absl::flat_hash_map<int, int> x;
     x.insert({1, 1});
-    ABSL_RAW_CHECK(x.find(0) == x.end(), "x should not contain 0");
+    CHECK(x.find(0) == x.end()) << "x should not contain 0";
     auto it = x.find(1);
-    ABSL_RAW_CHECK(it != x.end(), "x should contain 1");
-    ABSL_RAW_CHECK(it->second, "1 should map to 1");
+    CHECK(it != x.end()) << "x should contain 1";
+    CHECK(it->second) << "1 should map to 1";
   }
 };
 const BeforeMain before_main;
@@ -236,33 +236,36 @@
   // Erase all elements.
   {
     flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, [](std::pair<const int, int>) { return true; });
+    EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
     EXPECT_THAT(s, IsEmpty());
   }
   // Erase no elements.
   {
     flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, [](std::pair<const int, int>) { return false; });
+    EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
                                         Pair(4, 4), Pair(5, 5)));
   }
   // Erase specific elements.
   {
     flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s,
-             [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+    EXPECT_EQ(erase_if(s,
+                       [](std::pair<const int, int> kvp) {
+                         return kvp.first % 2 == 1;
+                       }),
+              3);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
   }
   // Predicate is function reference.
   {
     flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, FirstIsEven);
+    EXPECT_EQ(erase_if(s, FirstIsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
   }
   // Predicate is function pointer.
   {
     flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, &FirstIsEven);
+    EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
   }
 }
@@ -282,6 +285,40 @@
 }
 #endif
 
+TEST(FlatHashMap, Reserve) {
+  // Verify that if we reserve(size() + n) then we can perform n insertions
+  // without a rehash, i.e., without invalidating any references.
+  for (size_t trial = 0; trial < 20; ++trial) {
+    for (size_t initial = 3; initial < 100; ++initial) {
+      // Fill in `initial` entries, then erase 2 of them, then reserve space for
+      // two inserts and check for reference stability while doing the inserts.
+      flat_hash_map<size_t, size_t> map;
+      for (size_t i = 0; i < initial; ++i) {
+        map[i] = i;
+      }
+      map.erase(0);
+      map.erase(1);
+      map.reserve(map.size() + 2);
+      size_t& a2 = map[2];
+      // In the event of a failure, asan will complain in one of these two
+      // assignments.
+      map[initial] = a2;
+      map[initial + 1] = a2;
+      // Fail even when not under asan:
+      size_t& a2new = map[2];
+      EXPECT_EQ(&a2, &a2new);
+    }
+  }
+}
+
+TEST(FlatHashMap, RecursiveTypeCompiles) {
+  struct RecursiveType {
+    flat_hash_map<int, RecursiveType> m;
+  };
+  RecursiveType t;
+  t.m[0] = RecursiveType{};
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/flat_hash_set.h b/abseil-cpp/absl/container/flat_hash_set.h
index 81e145a..c789c7e 100644
--- a/abseil-cpp/absl/container/flat_hash_set.h
+++ b/abseil-cpp/absl/container/flat_hash_set.h
@@ -67,11 +67,15 @@
 //
 // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
 // fundamental and Abseil types that support the `absl::Hash` framework have a
-// compatible equality operator for comparing insertions into `flat_hash_map`.
+// compatible equality operator for comparing insertions into `flat_hash_set`.
 // If your type is not yet supported by the `absl::Hash` framework, see
 // absl/hash/hash.h for information on extending Abseil hashing to user-defined
 // types.
 //
+// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
 // NOTE: A `flat_hash_set` stores its keys directly inside its implementation
 // array to avoid memory indirection. Because a `flat_hash_set` is designed to
 // move data when rehashed, set keys will not retain pointer stability. If you
@@ -106,7 +110,7 @@
  public:
   // Constructors and Assignment Operators
   //
-  // A flat_hash_set supports the same overload set as `std::unordered_map`
+  // A flat_hash_set supports the same overload set as `std::unordered_set`
   // for construction and assignment:
   //
   // *  Default constructor
@@ -173,7 +177,7 @@
   // available within the `flat_hash_set`.
   //
   // NOTE: this member function is particular to `absl::flat_hash_set` and is
-  // not provided in the `std::unordered_map` API.
+  // not provided in the `std::unordered_set` API.
   using Base::capacity;
 
   // flat_hash_set::empty()
@@ -223,7 +227,11 @@
   // iterator erase(const_iterator first, const_iterator last):
   //
   //   Erases the elements in the open interval [`first`, `last`), returning an
-  //   iterator pointing to `last`.
+  //   iterator pointing to `last`. The special case of calling
+  //   `erase(begin(), end())` resets the reserved growth such that if
+  //   `reserve(N)` has previously been called and there has been no intervening
+  //   call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+  //   to assume that inserting N elements will not cause a rehash.
   //
   // size_type erase(const key_type& key):
   //
@@ -324,7 +332,7 @@
 
   // flat_hash_set::merge()
   //
-  // Extracts elements from a given `source` flat hash map into this
+  // Extracts elements from a given `source` flat hash set into this
   // `flat_hash_set`. If the destination `flat_hash_set` already contains an
   // element with an equivalent key, that element is not extracted.
   using Base::merge;
@@ -332,15 +340,15 @@
   // flat_hash_set::swap(flat_hash_set& other)
   //
   // Exchanges the contents of this `flat_hash_set` with those of the `other`
-  // flat hash map, avoiding invocation of any move, copy, or swap operations on
+  // flat hash set, avoiding invocation of any move, copy, or swap operations on
   // individual elements.
   //
   // All iterators and references on the `flat_hash_set` remain valid, excepting
   // for the past-the-end iterator, which is invalidated.
   //
   // `swap()` requires that the flat hash set's hashing and key equivalence
-  // functions be Swappable, and are exchaged using unqualified calls to
-  // non-member `swap()`. If the map's allocator has
+  // functions be Swappable, and are exchanged using unqualified calls to
+  // non-member `swap()`. If the set's allocator has
   // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
   // set to `true`, the allocators are also exchanged using an unqualified call
   // to non-member `swap()`; otherwise, the allocators are not swapped.
@@ -395,14 +403,14 @@
   // flat_hash_set::bucket_count()
   //
   // Returns the number of "buckets" within the `flat_hash_set`. Note that
-  // because a flat hash map contains all elements within its internal storage,
+  // because a flat hash set contains all elements within its internal storage,
   // this value simply equals the current capacity of the `flat_hash_set`.
   using Base::bucket_count;
 
   // flat_hash_set::load_factor()
   //
   // Returns the current load factor of the `flat_hash_set` (the average number
-  // of slots occupied with a value within the hash map).
+  // of slots occupied with a value within the hash set).
   using Base::load_factor;
 
   // flat_hash_set::max_load_factor()
@@ -443,9 +451,11 @@
 // erase_if(flat_hash_set<>, Pred)
 //
 // Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
 template <typename T, typename H, typename E, typename A, typename Predicate>
-void erase_if(flat_hash_set<T, H, E, A>& c, Predicate pred) {
-  container_internal::EraseIf(pred, &c);
+typename flat_hash_set<T, H, E, A>::size_type erase_if(
+    flat_hash_set<T, H, E, A>& c, Predicate pred) {
+  return container_internal::EraseIf(pred, &c);
 }
 
 namespace container_internal {
@@ -468,13 +478,6 @@
     absl::allocator_traits<Allocator>::destroy(*alloc, slot);
   }
 
-  template <class Allocator>
-  static void transfer(Allocator* alloc, slot_type* new_slot,
-                       slot_type* old_slot) {
-    construct(alloc, new_slot, std::move(*old_slot));
-    destroy(alloc, old_slot);
-  }
-
   static T& element(slot_type* slot) { return *slot; }
 
   template <class F, class... Args>
diff --git a/abseil-cpp/absl/container/flat_hash_set_test.cc b/abseil-cpp/absl/container/flat_hash_set_test.cc
index 8f6f994..20130f9 100644
--- a/abseil-cpp/absl/container/flat_hash_set_test.cc
+++ b/abseil-cpp/absl/container/flat_hash_set_test.cc
@@ -16,12 +16,12 @@
 
 #include <vector>
 
-#include "absl/base/internal/raw_logging.h"
 #include "absl/container/internal/hash_generator_testing.h"
 #include "absl/container/internal/unordered_set_constructor_test.h"
 #include "absl/container/internal/unordered_set_lookup_test.h"
 #include "absl/container/internal/unordered_set_members_test.h"
 #include "absl/container/internal/unordered_set_modifiers_test.h"
+#include "absl/log/check.h"
 #include "absl/memory/memory.h"
 #include "absl/strings/string_view.h"
 
@@ -42,8 +42,8 @@
   BeforeMain() {
     absl::flat_hash_set<int> x;
     x.insert(1);
-    ABSL_RAW_CHECK(!x.contains(0), "x should not contain 0");
-    ABSL_RAW_CHECK(x.contains(1), "x should contain 1");
+    CHECK(!x.contains(0)) << "x should not contain 0";
+    CHECK(x.contains(1)) << "x should contain 1";
   }
 };
 const BeforeMain before_main;
@@ -143,31 +143,31 @@
   // Erase all elements.
   {
     flat_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, [](int) { return true; });
+    EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
     EXPECT_THAT(s, IsEmpty());
   }
   // Erase no elements.
   {
     flat_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, [](int) { return false; });
+    EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
     EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
   }
   // Erase specific elements.
   {
     flat_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, [](int k) { return k % 2 == 1; });
+    EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
     EXPECT_THAT(s, UnorderedElementsAre(2, 4));
   }
   // Predicate is function reference.
   {
     flat_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, IsEven);
+    EXPECT_EQ(erase_if(s, IsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
   }
   // Predicate is function pointer.
   {
     flat_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, &IsEven);
+    EXPECT_EQ(erase_if(s, &IsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
   }
 }
diff --git a/abseil-cpp/absl/container/inlined_vector.h b/abseil-cpp/absl/container/inlined_vector.h
index 90bb96e..04e2c38 100644
--- a/abseil-cpp/absl/container/inlined_vector.h
+++ b/abseil-cpp/absl/container/inlined_vector.h
@@ -36,7 +36,6 @@
 #define ABSL_CONTAINER_INLINED_VECTOR_H_
 
 #include <algorithm>
-#include <cassert>
 #include <cstddef>
 #include <cstdlib>
 #include <cstring>
@@ -53,6 +52,7 @@
 #include "absl/base/port.h"
 #include "absl/container/internal/inlined_vector.h"
 #include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -72,37 +72,49 @@
 
   using Storage = inlined_vector_internal::Storage<T, N, A>;
 
-  using AllocatorTraits = typename Storage::AllocatorTraits;
-  using RValueReference = typename Storage::RValueReference;
-  using MoveIterator = typename Storage::MoveIterator;
-  using IsMemcpyOk = typename Storage::IsMemcpyOk;
+  template <typename TheA>
+  using AllocatorTraits = inlined_vector_internal::AllocatorTraits<TheA>;
+  template <typename TheA>
+  using MoveIterator = inlined_vector_internal::MoveIterator<TheA>;
+  template <typename TheA>
+  using IsMoveAssignOk = inlined_vector_internal::IsMoveAssignOk<TheA>;
 
-  template <typename Iterator>
+  template <typename TheA, typename Iterator>
   using IteratorValueAdapter =
-      typename Storage::template IteratorValueAdapter<Iterator>;
-  using CopyValueAdapter = typename Storage::CopyValueAdapter;
-  using DefaultValueAdapter = typename Storage::DefaultValueAdapter;
+      inlined_vector_internal::IteratorValueAdapter<TheA, Iterator>;
+  template <typename TheA>
+  using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter<TheA>;
+  template <typename TheA>
+  using DefaultValueAdapter =
+      inlined_vector_internal::DefaultValueAdapter<TheA>;
 
   template <typename Iterator>
   using EnableIfAtLeastForwardIterator = absl::enable_if_t<
-      inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+      inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
   template <typename Iterator>
   using DisableIfAtLeastForwardIterator = absl::enable_if_t<
-      !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+      !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
+
+  using MemcpyPolicy = typename Storage::MemcpyPolicy;
+  using ElementwiseAssignPolicy = typename Storage::ElementwiseAssignPolicy;
+  using ElementwiseConstructPolicy =
+      typename Storage::ElementwiseConstructPolicy;
+  using MoveAssignmentPolicy = typename Storage::MoveAssignmentPolicy;
 
  public:
-  using allocator_type = typename Storage::allocator_type;
-  using value_type = typename Storage::value_type;
-  using pointer = typename Storage::pointer;
-  using const_pointer = typename Storage::const_pointer;
-  using size_type = typename Storage::size_type;
-  using difference_type = typename Storage::difference_type;
-  using reference = typename Storage::reference;
-  using const_reference = typename Storage::const_reference;
-  using iterator = typename Storage::iterator;
-  using const_iterator = typename Storage::const_iterator;
-  using reverse_iterator = typename Storage::reverse_iterator;
-  using const_reverse_iterator = typename Storage::const_reverse_iterator;
+  using allocator_type = A;
+  using value_type = inlined_vector_internal::ValueType<A>;
+  using pointer = inlined_vector_internal::Pointer<A>;
+  using const_pointer = inlined_vector_internal::ConstPointer<A>;
+  using size_type = inlined_vector_internal::SizeType<A>;
+  using difference_type = inlined_vector_internal::DifferenceType<A>;
+  using reference = inlined_vector_internal::Reference<A>;
+  using const_reference = inlined_vector_internal::ConstReference<A>;
+  using iterator = inlined_vector_internal::Iterator<A>;
+  using const_iterator = inlined_vector_internal::ConstIterator<A>;
+  using reverse_iterator = inlined_vector_internal::ReverseIterator<A>;
+  using const_reverse_iterator =
+      inlined_vector_internal::ConstReverseIterator<A>;
 
   // ---------------------------------------------------------------------------
   // InlinedVector Constructors and Destructor
@@ -111,28 +123,28 @@
   // Creates an empty inlined vector with a value-initialized allocator.
   InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {}
 
-  // Creates an empty inlined vector with a copy of `alloc`.
-  explicit InlinedVector(const allocator_type& alloc) noexcept
-      : storage_(alloc) {}
+  // Creates an empty inlined vector with a copy of `allocator`.
+  explicit InlinedVector(const allocator_type& allocator) noexcept
+      : storage_(allocator) {}
 
   // Creates an inlined vector with `n` copies of `value_type()`.
   explicit InlinedVector(size_type n,
-                         const allocator_type& alloc = allocator_type())
-      : storage_(alloc) {
-    storage_.Initialize(DefaultValueAdapter(), n);
+                         const allocator_type& allocator = allocator_type())
+      : storage_(allocator) {
+    storage_.Initialize(DefaultValueAdapter<A>(), n);
   }
 
   // Creates an inlined vector with `n` copies of `v`.
   InlinedVector(size_type n, const_reference v,
-                const allocator_type& alloc = allocator_type())
-      : storage_(alloc) {
-    storage_.Initialize(CopyValueAdapter(v), n);
+                const allocator_type& allocator = allocator_type())
+      : storage_(allocator) {
+    storage_.Initialize(CopyValueAdapter<A>(std::addressof(v)), n);
   }
 
   // Creates an inlined vector with copies of the elements of `list`.
   InlinedVector(std::initializer_list<value_type> list,
-                const allocator_type& alloc = allocator_type())
-      : InlinedVector(list.begin(), list.end(), alloc) {}
+                const allocator_type& allocator = allocator_type())
+      : InlinedVector(list.begin(), list.end(), allocator) {}
 
   // Creates an inlined vector with elements constructed from the provided
   // forward iterator range [`first`, `last`).
@@ -141,38 +153,50 @@
   // this constructor with two integral arguments and a call to the above
   // `InlinedVector(size_type, const_reference)` constructor.
   template <typename ForwardIterator,
-            EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+            EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
   InlinedVector(ForwardIterator first, ForwardIterator last,
-                const allocator_type& alloc = allocator_type())
-      : storage_(alloc) {
-    storage_.Initialize(IteratorValueAdapter<ForwardIterator>(first),
-                        std::distance(first, last));
+                const allocator_type& allocator = allocator_type())
+      : storage_(allocator) {
+    storage_.Initialize(IteratorValueAdapter<A, ForwardIterator>(first),
+                        static_cast<size_t>(std::distance(first, last)));
   }
 
   // Creates an inlined vector with elements constructed from the provided input
   // iterator range [`first`, `last`).
   template <typename InputIterator,
-            DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+            DisableIfAtLeastForwardIterator<InputIterator> = 0>
   InlinedVector(InputIterator first, InputIterator last,
-                const allocator_type& alloc = allocator_type())
-      : storage_(alloc) {
+                const allocator_type& allocator = allocator_type())
+      : storage_(allocator) {
     std::copy(first, last, std::back_inserter(*this));
   }
 
   // Creates an inlined vector by copying the contents of `other` using
   // `other`'s allocator.
   InlinedVector(const InlinedVector& other)
-      : InlinedVector(other, *other.storage_.GetAllocPtr()) {}
+      : InlinedVector(other, other.storage_.GetAllocator()) {}
 
-  // Creates an inlined vector by copying the contents of `other` using `alloc`.
-  InlinedVector(const InlinedVector& other, const allocator_type& alloc)
-      : storage_(alloc) {
-    if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
-      storage_.MemcpyFrom(other.storage_);
-    } else {
-      storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()),
-                          other.size());
+  // Creates an inlined vector by copying the contents of `other` using the
+  // provided `allocator`.
+  InlinedVector(const InlinedVector& other, const allocator_type& allocator)
+      : storage_(allocator) {
+    // Fast path: if the other vector is empty, there's nothing for us to do.
+    if (other.empty()) {
+      return;
     }
+
+    // Fast path: if the value type is trivially copy constructible, we know the
+    // allocator doesn't do anything fancy, and there is nothing on the heap
+    // then we know it is legal for us to simply memcpy the other vector's
+    // inlined bytes to form our copy of its elements.
+    if (absl::is_trivially_copy_constructible<value_type>::value &&
+        std::is_same<A, std::allocator<value_type>>::value &&
+        !other.storage_.GetIsAllocated()) {
+      storage_.MemcpyFrom(other.storage_);
+      return;
+    }
+
+    storage_.InitFrom(other.storage_);
   }
 
   // Creates an inlined vector by moving in the contents of `other` without
@@ -192,55 +216,81 @@
   InlinedVector(InlinedVector&& other) noexcept(
       absl::allocator_is_nothrow<allocator_type>::value ||
       std::is_nothrow_move_constructible<value_type>::value)
-      : storage_(*other.storage_.GetAllocPtr()) {
-    if (IsMemcpyOk::value) {
+      : storage_(other.storage_.GetAllocator()) {
+    // Fast path: if the value type can be trivially relocated (i.e. moved from
+    // and destroyed), and we know the allocator doesn't do anything fancy, then
+    // it's safe for us to simply adopt the contents of the storage for `other`
+    // and remove its own reference to them. It's as if we had individually
+    // move-constructed each value and then destroyed the original.
+    if (absl::is_trivially_relocatable<value_type>::value &&
+        std::is_same<A, std::allocator<value_type>>::value) {
       storage_.MemcpyFrom(other.storage_);
-
       other.storage_.SetInlinedSize(0);
-    } else if (other.storage_.GetIsAllocated()) {
-      storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
-                                other.storage_.GetAllocatedCapacity());
+      return;
+    }
+
+    // Fast path: if the other vector is on the heap, we can simply take over
+    // its allocation.
+    if (other.storage_.GetIsAllocated()) {
+      storage_.SetAllocation({other.storage_.GetAllocatedData(),
+                              other.storage_.GetAllocatedCapacity()});
       storage_.SetAllocatedSize(other.storage_.GetSize());
 
       other.storage_.SetInlinedSize(0);
-    } else {
-      IteratorValueAdapter<MoveIterator> other_values(
-          MoveIterator(other.storage_.GetInlinedData()));
-
-      inlined_vector_internal::ConstructElements(
-          storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values,
-          other.storage_.GetSize());
-
-      storage_.SetInlinedSize(other.storage_.GetSize());
+      return;
     }
+
+    // Otherwise we must move each element individually.
+    IteratorValueAdapter<A, MoveIterator<A>> other_values(
+        MoveIterator<A>(other.storage_.GetInlinedData()));
+
+    inlined_vector_internal::ConstructElements<A>(
+        storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+        other.storage_.GetSize());
+
+    storage_.SetInlinedSize(other.storage_.GetSize());
   }
 
   // Creates an inlined vector by moving in the contents of `other` with a copy
-  // of `alloc`.
+  // of `allocator`.
   //
-  // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other`
+  // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other`
   // contains allocated memory, this move constructor will still allocate. Since
   // allocation is performed, this constructor can only be `noexcept` if the
   // specified allocator is also `noexcept`.
-  InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept(
-      absl::allocator_is_nothrow<allocator_type>::value)
-      : storage_(alloc) {
-    if (IsMemcpyOk::value) {
+  InlinedVector(
+      InlinedVector&& other,
+      const allocator_type&
+          allocator) noexcept(absl::allocator_is_nothrow<allocator_type>::value)
+      : storage_(allocator) {
+    // Fast path: if the value type can be trivially relocated (i.e. moved from
+    // and destroyed), and we know the allocator doesn't do anything fancy, then
+    // it's safe for us to simply adopt the contents of the storage for `other`
+    // and remove its own reference to them. It's as if we had individually
+    // move-constructed each value and then destroyed the original.
+    if (absl::is_trivially_relocatable<value_type>::value &&
+        std::is_same<A, std::allocator<value_type>>::value) {
       storage_.MemcpyFrom(other.storage_);
-
       other.storage_.SetInlinedSize(0);
-    } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) &&
-               other.storage_.GetIsAllocated()) {
-      storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
-                                other.storage_.GetAllocatedCapacity());
+      return;
+    }
+
+    // Fast path: if the other vector is on the heap and shared the same
+    // allocator, we can simply take over its allocation.
+    if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
+        other.storage_.GetIsAllocated()) {
+      storage_.SetAllocation({other.storage_.GetAllocatedData(),
+                              other.storage_.GetAllocatedCapacity()});
       storage_.SetAllocatedSize(other.storage_.GetSize());
 
       other.storage_.SetInlinedSize(0);
-    } else {
-      storage_.Initialize(
-          IteratorValueAdapter<MoveIterator>(MoveIterator(other.data())),
-          other.size());
+      return;
     }
+
+    // Otherwise we must move each element individually.
+    storage_.Initialize(
+        IteratorValueAdapter<A, MoveIterator<A>>(MoveIterator<A>(other.data())),
+        other.size());
   }
 
   ~InlinedVector() {}
@@ -265,8 +315,10 @@
   size_type max_size() const noexcept {
     // One bit of the size storage is used to indicate whether the inlined
     // vector contains allocated memory. As a result, the maximum size that the
-    // inlined vector can express is half of the max for `size_type`.
-    return (std::numeric_limits<size_type>::max)() / 2;
+    // inlined vector can express is the minimum of the limit of how many
+    // objects we can allocate and std::numeric_limits<size_type>::max() / 2.
+    return (std::min)(AllocatorTraits<A>::max_size(storage_.GetAllocator()),
+                      (std::numeric_limits<size_type>::max)() / 2);
   }
 
   // `InlinedVector::capacity()`
@@ -289,7 +341,7 @@
   // can be used to access and modify the contained elements.
   //
   // NOTE: only elements within [`data()`, `data() + size()`) are valid.
-  pointer data() noexcept {
+  pointer data() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
                                      : storage_.GetInlinedData();
   }
@@ -299,7 +351,7 @@
   // modify the contained elements.
   //
   // NOTE: only elements within [`data()`, `data() + size()`) are valid.
-  const_pointer data() const noexcept {
+  const_pointer data() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
                                      : storage_.GetInlinedData();
   }
@@ -307,14 +359,14 @@
   // `InlinedVector::operator[](...)`
   //
   // Returns a `reference` to the `i`th element of the inlined vector.
-  reference operator[](size_type i) {
+  reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(i < size());
     return data()[i];
   }
 
   // Overload of `InlinedVector::operator[](...)` that returns a
   // `const_reference` to the `i`th element of the inlined vector.
-  const_reference operator[](size_type i) const {
+  const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(i < size());
     return data()[i];
   }
@@ -325,7 +377,7 @@
   //
   // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
   // in both debug and non-debug builds, `std::out_of_range` will be thrown.
-  reference at(size_type i) {
+  reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (ABSL_PREDICT_FALSE(i >= size())) {
       base_internal::ThrowStdOutOfRange(
           "`InlinedVector::at(size_type)` failed bounds check");
@@ -338,7 +390,7 @@
   //
   // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
   // in both debug and non-debug builds, `std::out_of_range` will be thrown.
-  const_reference at(size_type i) const {
+  const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (ABSL_PREDICT_FALSE(i >= size())) {
       base_internal::ThrowStdOutOfRange(
           "`InlinedVector::at(size_type) const` failed bounds check");
@@ -349,14 +401,14 @@
   // `InlinedVector::front()`
   //
   // Returns a `reference` to the first element of the inlined vector.
-  reference front() {
+  reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[0];
   }
 
   // Overload of `InlinedVector::front()` that returns a `const_reference` to
   // the first element of the inlined vector.
-  const_reference front() const {
+  const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[0];
   }
@@ -364,14 +416,14 @@
   // `InlinedVector::back()`
   //
   // Returns a `reference` to the last element of the inlined vector.
-  reference back() {
+  reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[size() - 1];
   }
 
   // Overload of `InlinedVector::back()` that returns a `const_reference` to the
   // last element of the inlined vector.
-  const_reference back() const {
+  const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(!empty());
     return data()[size() - 1];
   }
@@ -379,68 +431,87 @@
   // `InlinedVector::begin()`
   //
   // Returns an `iterator` to the beginning of the inlined vector.
-  iterator begin() noexcept { return data(); }
+  iterator begin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
 
   // Overload of `InlinedVector::begin()` that returns a `const_iterator` to
   // the beginning of the inlined vector.
-  const_iterator begin() const noexcept { return data(); }
+  const_iterator begin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return data();
+  }
 
   // `InlinedVector::end()`
   //
   // Returns an `iterator` to the end of the inlined vector.
-  iterator end() noexcept { return data() + size(); }
+  iterator end() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return data() + size();
+  }
 
   // Overload of `InlinedVector::end()` that returns a `const_iterator` to the
   // end of the inlined vector.
-  const_iterator end() const noexcept { return data() + size(); }
+  const_iterator end() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return data() + size();
+  }
 
   // `InlinedVector::cbegin()`
   //
   // Returns a `const_iterator` to the beginning of the inlined vector.
-  const_iterator cbegin() const noexcept { return begin(); }
+  const_iterator cbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return begin();
+  }
 
   // `InlinedVector::cend()`
   //
   // Returns a `const_iterator` to the end of the inlined vector.
-  const_iterator cend() const noexcept { return end(); }
+  const_iterator cend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return end();
+  }
 
   // `InlinedVector::rbegin()`
   //
   // Returns a `reverse_iterator` from the end of the inlined vector.
-  reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
+  reverse_iterator rbegin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return reverse_iterator(end());
+  }
 
   // Overload of `InlinedVector::rbegin()` that returns a
   // `const_reverse_iterator` from the end of the inlined vector.
-  const_reverse_iterator rbegin() const noexcept {
+  const_reverse_iterator rbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_reverse_iterator(end());
   }
 
   // `InlinedVector::rend()`
   //
   // Returns a `reverse_iterator` from the beginning of the inlined vector.
-  reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
+  reverse_iterator rend() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return reverse_iterator(begin());
+  }
 
   // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator`
   // from the beginning of the inlined vector.
-  const_reverse_iterator rend() const noexcept {
+  const_reverse_iterator rend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_reverse_iterator(begin());
   }
 
   // `InlinedVector::crbegin()`
   //
   // Returns a `const_reverse_iterator` from the end of the inlined vector.
-  const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+  const_reverse_iterator crbegin() const noexcept
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return rbegin();
+  }
 
   // `InlinedVector::crend()`
   //
   // Returns a `const_reverse_iterator` from the beginning of the inlined
   // vector.
-  const_reverse_iterator crend() const noexcept { return rend(); }
+  const_reverse_iterator crend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return rend();
+  }
 
   // `InlinedVector::get_allocator()`
   //
   // Returns a copy of the inlined vector's allocator.
-  allocator_type get_allocator() const { return *storage_.GetAllocPtr(); }
+  allocator_type get_allocator() const { return storage_.GetAllocator(); }
 
   // ---------------------------------------------------------------------------
   // InlinedVector Member Mutators
@@ -474,18 +545,7 @@
   // unspecified state.
   InlinedVector& operator=(InlinedVector&& other) {
     if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
-      if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) {
-        inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
-                                                 size());
-        storage_.DeallocateIfAllocated();
-        storage_.MemcpyFrom(other.storage_);
-
-        other.storage_.SetInlinedSize(0);
-      } else {
-        storage_.Assign(IteratorValueAdapter<MoveIterator>(
-                            MoveIterator(other.storage_.GetInlinedData())),
-                        other.size());
-      }
+      MoveAssignment(MoveAssignmentPolicy{}, std::move(other));
     }
 
     return *this;
@@ -495,7 +555,7 @@
   //
   // Replaces the contents of the inlined vector with `n` copies of `v`.
   void assign(size_type n, const_reference v) {
-    storage_.Assign(CopyValueAdapter(v), n);
+    storage_.Assign(CopyValueAdapter<A>(std::addressof(v)), n);
   }
 
   // Overload of `InlinedVector::assign(...)` that replaces the contents of the
@@ -509,10 +569,10 @@
   //
   // NOTE: this overload is for iterators that are "forward" category or better.
   template <typename ForwardIterator,
-            EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+            EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
   void assign(ForwardIterator first, ForwardIterator last) {
-    storage_.Assign(IteratorValueAdapter<ForwardIterator>(first),
-                    std::distance(first, last));
+    storage_.Assign(IteratorValueAdapter<A, ForwardIterator>(first),
+                    static_cast<size_t>(std::distance(first, last)));
   }
 
   // Overload of `InlinedVector::assign(...)` to replace the contents of the
@@ -520,7 +580,7 @@
   //
   // NOTE: this overload is for iterators that are "input" category.
   template <typename InputIterator,
-            DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+            DisableIfAtLeastForwardIterator<InputIterator> = 0>
   void assign(InputIterator first, InputIterator last) {
     size_type i = 0;
     for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
@@ -539,7 +599,7 @@
   // is larger than `size()`, new elements are value-initialized.
   void resize(size_type n) {
     ABSL_HARDENING_ASSERT(n <= max_size());
-    storage_.Resize(DefaultValueAdapter(), n);
+    storage_.Resize(DefaultValueAdapter<A>(), n);
   }
 
   // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
@@ -549,33 +609,49 @@
   // is larger than `size()`, new elements are copied-constructed from `v`.
   void resize(size_type n, const_reference v) {
     ABSL_HARDENING_ASSERT(n <= max_size());
-    storage_.Resize(CopyValueAdapter(v), n);
+    storage_.Resize(CopyValueAdapter<A>(std::addressof(v)), n);
   }
 
   // `InlinedVector::insert(...)`
   //
   // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly
   // inserted element.
-  iterator insert(const_iterator pos, const_reference v) {
+  iterator insert(const_iterator pos,
+                  const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return emplace(pos, v);
   }
 
   // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
   // move semantics, returning an `iterator` to the newly inserted element.
-  iterator insert(const_iterator pos, RValueReference v) {
+  iterator insert(const_iterator pos,
+                  value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return emplace(pos, std::move(v));
   }
 
   // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies
   // of `v` starting at `pos`, returning an `iterator` pointing to the first of
   // the newly inserted elements.
-  iterator insert(const_iterator pos, size_type n, const_reference v) {
+  iterator insert(const_iterator pos, size_type n,
+                  const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(pos >= begin());
     ABSL_HARDENING_ASSERT(pos <= end());
 
     if (ABSL_PREDICT_TRUE(n != 0)) {
       value_type dealias = v;
-      return storage_.Insert(pos, CopyValueAdapter(dealias), n);
+      // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+      // It appears that GCC thinks that since `pos` is a const pointer and may
+      // point to uninitialized memory at this point, a warning should be
+      // issued. But `pos` is actually only used to compute an array index to
+      // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+      return storage_.Insert(pos, CopyValueAdapter<A>(std::addressof(dealias)),
+                             n);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
     } else {
       return const_cast<iterator>(pos);
     }
@@ -584,7 +660,8 @@
   // Overload of `InlinedVector::insert(...)` that inserts copies of the
   // elements of `list` starting at `pos`, returning an `iterator` pointing to
   // the first of the newly inserted elements.
-  iterator insert(const_iterator pos, std::initializer_list<value_type> list) {
+  iterator insert(const_iterator pos, std::initializer_list<value_type> list)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert(pos, list.begin(), list.end());
   }
 
@@ -594,15 +671,16 @@
   //
   // NOTE: this overload is for iterators that are "forward" category or better.
   template <typename ForwardIterator,
-            EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+            EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
   iterator insert(const_iterator pos, ForwardIterator first,
-                  ForwardIterator last) {
+                  ForwardIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(pos >= begin());
     ABSL_HARDENING_ASSERT(pos <= end());
 
     if (ABSL_PREDICT_TRUE(first != last)) {
-      return storage_.Insert(pos, IteratorValueAdapter<ForwardIterator>(first),
-                             std::distance(first, last));
+      return storage_.Insert(
+          pos, IteratorValueAdapter<A, ForwardIterator>(first),
+          static_cast<size_type>(std::distance(first, last)));
     } else {
       return const_cast<iterator>(pos);
     }
@@ -614,12 +692,13 @@
   //
   // NOTE: this overload is for iterators that are "input" category.
   template <typename InputIterator,
-            DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
-  iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
+            DisableIfAtLeastForwardIterator<InputIterator> = 0>
+  iterator insert(const_iterator pos, InputIterator first,
+                  InputIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(pos >= begin());
     ABSL_HARDENING_ASSERT(pos <= end());
 
-    size_type index = std::distance(cbegin(), pos);
+    size_type index = static_cast<size_type>(std::distance(cbegin(), pos));
     for (size_type i = index; first != last; ++i, static_cast<void>(++first)) {
       insert(data() + i, *first);
     }
@@ -632,15 +711,28 @@
   // Constructs and inserts an element using `args...` in the inlined vector at
   // `pos`, returning an `iterator` pointing to the newly emplaced element.
   template <typename... Args>
-  iterator emplace(const_iterator pos, Args&&... args) {
+  iterator emplace(const_iterator pos,
+                   Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(pos >= begin());
     ABSL_HARDENING_ASSERT(pos <= end());
 
     value_type dealias(std::forward<Args>(args)...);
+    // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+    // It appears that GCC thinks that since `pos` is a const pointer and may
+    // point to uninitialized memory at this point, a warning should be
+    // issued. But `pos` is actually only used to compute an array index to
+    // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
     return storage_.Insert(pos,
-                           IteratorValueAdapter<MoveIterator>(
-                               MoveIterator(std::addressof(dealias))),
+                           IteratorValueAdapter<A, MoveIterator<A>>(
+                               MoveIterator<A>(std::addressof(dealias))),
                            1);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
   }
 
   // `InlinedVector::emplace_back(...)`
@@ -648,7 +740,7 @@
   // Constructs and inserts an element using `args...` in the inlined vector at
   // `end()`, returning a `reference` to the newly emplaced element.
   template <typename... Args>
-  reference emplace_back(Args&&... args) {
+  reference emplace_back(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return storage_.EmplaceBack(std::forward<Args>(args)...);
   }
 
@@ -659,7 +751,7 @@
 
   // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
   // using move semantics.
-  void push_back(RValueReference v) {
+  void push_back(value_type&& v) {
     static_cast<void>(emplace_back(std::move(v)));
   }
 
@@ -669,7 +761,7 @@
   void pop_back() noexcept {
     ABSL_HARDENING_ASSERT(!empty());
 
-    AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
+    AllocatorTraits<A>::destroy(storage_.GetAllocator(), data() + (size() - 1));
     storage_.SubtractSize(1);
   }
 
@@ -678,8 +770,8 @@
   // Erases the element at `pos`, returning an `iterator` pointing to where the
   // erased element was located.
   //
-  // NOTE: may return `end()`, which is not dereferencable.
-  iterator erase(const_iterator pos) {
+  // NOTE: may return `end()`, which is not dereferenceable.
+  iterator erase(const_iterator pos) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(pos >= begin());
     ABSL_HARDENING_ASSERT(pos < end());
 
@@ -690,8 +782,9 @@
   // range [`from`, `to`), returning an `iterator` pointing to where the first
   // erased element was located.
   //
-  // NOTE: may return `end()`, which is not dereferencable.
-  iterator erase(const_iterator from, const_iterator to) {
+  // NOTE: may return `end()`, which is not dereferenceable.
+  iterator erase(const_iterator from,
+                 const_iterator to) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(from >= begin());
     ABSL_HARDENING_ASSERT(from <= to);
     ABSL_HARDENING_ASSERT(to <= end());
@@ -708,8 +801,8 @@
   // Destroys all elements in the inlined vector, setting the size to `0` and
   // deallocating any held memory.
   void clear() noexcept {
-    inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
-                                             size());
+    inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+        storage_.GetAllocator(), data(), size());
     storage_.DeallocateIfAllocated();
 
     storage_.SetInlinedSize(0);
@@ -722,15 +815,12 @@
 
   // `InlinedVector::shrink_to_fit()`
   //
-  // Reduces memory usage by freeing unused memory. After being called, calls to
-  // `capacity()` will be equal to `max(N, size())`.
+  // Attempts to reduce memory usage by moving elements to (or keeping elements
+  // in) the smallest available buffer sufficient for containing `size()`
+  // elements.
   //
-  // If `size() <= N` and the inlined vector contains allocated memory, the
-  // elements will all be moved to the inlined space and the allocated memory
-  // will be deallocated.
-  //
-  // If `size() > N` and `size() < capacity()`, the elements will be moved to a
-  // smaller allocation.
+  // If `size()` is sufficiently small, the elements will be moved into (or kept
+  // in) the inlined space.
   void shrink_to_fit() {
     if (storage_.GetIsAllocated()) {
       storage_.ShrinkToFit();
@@ -750,6 +840,73 @@
   template <typename H, typename TheT, size_t TheN, typename TheA>
   friend H AbslHashValue(H h, const absl::InlinedVector<TheT, TheN, TheA>& a);
 
+  void MoveAssignment(MemcpyPolicy, InlinedVector&& other) {
+    // Assumption check: we shouldn't be told to use memcpy to implement move
+    // assignment unless we have trivially destructible elements and an
+    // allocator that does nothing fancy.
+    static_assert(absl::is_trivially_destructible<value_type>::value, "");
+    static_assert(std::is_same<A, std::allocator<value_type>>::value, "");
+
+    // Throw away our existing heap allocation, if any. There is no need to
+    // destroy the existing elements one by one because we know they are
+    // trivially destructible.
+    storage_.DeallocateIfAllocated();
+
+    // Adopt the other vector's inline elements or heap allocation.
+    storage_.MemcpyFrom(other.storage_);
+    other.storage_.SetInlinedSize(0);
+  }
+
+  // Destroy our existing elements, if any, and adopt the heap-allocated
+  // elements of the other vector.
+  //
+  // REQUIRES: other.storage_.GetIsAllocated()
+  void DestroyExistingAndAdopt(InlinedVector&& other) {
+    ABSL_HARDENING_ASSERT(other.storage_.GetIsAllocated());
+
+    inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+        storage_.GetAllocator(), data(), size());
+    storage_.DeallocateIfAllocated();
+
+    storage_.MemcpyFrom(other.storage_);
+    other.storage_.SetInlinedSize(0);
+  }
+
+  void MoveAssignment(ElementwiseAssignPolicy, InlinedVector&& other) {
+    // Fast path: if the other vector is on the heap then we don't worry about
+    // actually move-assigning each element. Instead we only throw away our own
+    // existing elements and adopt the heap allocation of the other vector.
+    if (other.storage_.GetIsAllocated()) {
+      DestroyExistingAndAdopt(std::move(other));
+      return;
+    }
+
+    storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
+                        MoveIterator<A>(other.storage_.GetInlinedData())),
+                    other.size());
+  }
+
+  void MoveAssignment(ElementwiseConstructPolicy, InlinedVector&& other) {
+    // Fast path: if the other vector is on the heap then we don't worry about
+    // actually move-assigning each element. Instead we only throw away our own
+    // existing elements and adopt the heap allocation of the other vector.
+    if (other.storage_.GetIsAllocated()) {
+      DestroyExistingAndAdopt(std::move(other));
+      return;
+    }
+
+    inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+        storage_.GetAllocator(), data(), size());
+    storage_.DeallocateIfAllocated();
+
+    IteratorValueAdapter<A, MoveIterator<A>> other_values(
+        MoveIterator<A>(other.storage_.GetInlinedData()));
+    inlined_vector_internal::ConstructElements<A>(
+        storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+        other.storage_.GetSize());
+    storage_.SetInlinedSize(other.storage_.GetSize());
+  }
+
   Storage storage_;
 };
 
@@ -774,7 +931,7 @@
                 const absl::InlinedVector<T, N, A>& b) {
   auto a_data = a.data();
   auto b_data = b.data();
-  return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
+  return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
 }
 
 // `operator!=(...)`
diff --git a/abseil-cpp/absl/container/inlined_vector_benchmark.cc b/abseil-cpp/absl/container/inlined_vector_benchmark.cc
index b8dafe9..5a04277 100644
--- a/abseil-cpp/absl/container/inlined_vector_benchmark.cc
+++ b/abseil-cpp/absl/container/inlined_vector_benchmark.cc
@@ -16,11 +16,11 @@
 #include <string>
 #include <vector>
 
-#include "benchmark/benchmark.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/container/inlined_vector.h"
 #include "absl/strings/str_cat.h"
+#include "benchmark/benchmark.h"
 
 namespace {
 
@@ -66,7 +66,7 @@
 BENCHMARK(BM_StdVectorFill)->Range(1, 256);
 
 // The purpose of the next two benchmarks is to verify that
-// absl::InlinedVector is efficient when moving is more efficent than
+// absl::InlinedVector is efficient when moving is more efficient than
 // copying. To do so, we use strings that are larger than the short
 // string optimization.
 bool StringRepresentedInline(std::string s) {
@@ -534,6 +534,28 @@
 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
 
+// Measure cost of copy-constructor+destructor.
+void BM_CopyTrivial(benchmark::State& state) {
+  const int n = state.range(0);
+  InlVec<int64_t> src(n);
+  for (auto s : state) {
+    InlVec<int64_t> copy(src);
+    benchmark::DoNotOptimize(copy);
+  }
+}
+BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
+
+// Measure cost of copy-constructor+destructor.
+void BM_CopyNonTrivial(benchmark::State& state) {
+  const int n = state.range(0);
+  InlVec<InlVec<int64_t>> src(n);
+  for (auto s : state) {
+    InlVec<InlVec<int64_t>> copy(src);
+    benchmark::DoNotOptimize(copy);
+  }
+}
+BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
+
 template <typename T, size_t FromSize, size_t ToSize>
 void BM_AssignSizeRef(benchmark::State& state) {
   auto size = ToSize;
diff --git a/abseil-cpp/absl/container/inlined_vector_test.cc b/abseil-cpp/absl/container/inlined_vector_test.cc
index 415c60d..b9a79f5 100644
--- a/abseil-cpp/absl/container/inlined_vector_test.cc
+++ b/abseil-cpp/absl/container/inlined_vector_test.cc
@@ -15,25 +15,28 @@
 #include "absl/container/inlined_vector.h"
 
 #include <algorithm>
+#include <cstddef>
 #include <forward_list>
+#include <iterator>
 #include <list>
 #include <memory>
 #include <scoped_allocator>
 #include <sstream>
 #include <stdexcept>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
 #include "absl/base/internal/exception_testing.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/base/options.h"
 #include "absl/container/internal/counting_allocator.h"
 #include "absl/container/internal/test_instance_tracker.h"
 #include "absl/hash/hash_testing.h"
+#include "absl/log/check.h"
 #include "absl/memory/memory.h"
 #include "absl/strings/str_cat.h"
 
@@ -49,14 +52,13 @@
 using testing::ElementsAreArray;
 using testing::Eq;
 using testing::Gt;
+using testing::Pointee;
+using testing::Pointwise;
 using testing::PrintToString;
+using testing::SizeIs;
 
 using IntVec = absl::InlinedVector<int, 8>;
 
-MATCHER_P(SizeIs, n, "") {
-  return testing::ExplainMatchResult(n, arg.size(), result_listener);
-}
-
 MATCHER_P(CapacityIs, n, "") {
   return testing::ExplainMatchResult(n, arg.capacity(), result_listener);
 }
@@ -101,13 +103,13 @@
   }
 
   void Ref() const {
-    ABSL_RAW_CHECK(count_ != nullptr, "");
+    CHECK_NE(count_, nullptr);
     ++(*count_);
   }
 
   void Unref() const {
     --(*count_);
-    ABSL_RAW_CHECK(*count_ >= 0, "");
+    CHECK_GE(*count_, 0);
   }
 
   int value_;
@@ -126,20 +128,20 @@
 
 // Append 0..len-1 to *v
 template <typename Container>
-static void Fill(Container* v, int len, int offset = 0) {
-  for (int i = 0; i < len; i++) {
-    v->push_back(i + offset);
+static void Fill(Container* v, size_t len, int offset = 0) {
+  for (size_t i = 0; i < len; i++) {
+    v->push_back(static_cast<int>(i) + offset);
   }
 }
 
-static IntVec Fill(int len, int offset = 0) {
+static IntVec Fill(size_t len, int offset = 0) {
   IntVec v;
   Fill(&v, len, offset);
   return v;
 }
 
 TEST(IntVec, SimpleOps) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v;
     const IntVec& cv = v;  // const alias
 
@@ -147,42 +149,42 @@
     EXPECT_EQ(len, v.size());
     EXPECT_LE(len, v.capacity());
 
-    for (int i = 0; i < len; i++) {
-      EXPECT_EQ(i, v[i]);
-      EXPECT_EQ(i, v.at(i));
+    for (size_t i = 0; i < len; i++) {
+      EXPECT_EQ(static_cast<int>(i), v[i]);
+      EXPECT_EQ(static_cast<int>(i), v.at(i));
     }
     EXPECT_EQ(v.begin(), v.data());
     EXPECT_EQ(cv.begin(), cv.data());
 
-    int counter = 0;
+    size_t counter = 0;
     for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) {
-      EXPECT_EQ(counter, *iter);
+      EXPECT_EQ(static_cast<int>(counter), *iter);
       counter++;
     }
     EXPECT_EQ(counter, len);
 
     counter = 0;
     for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) {
-      EXPECT_EQ(counter, *iter);
+      EXPECT_EQ(static_cast<int>(counter), *iter);
       counter++;
     }
     EXPECT_EQ(counter, len);
 
     counter = 0;
     for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) {
-      EXPECT_EQ(counter, *iter);
+      EXPECT_EQ(static_cast<int>(counter), *iter);
       counter++;
     }
     EXPECT_EQ(counter, len);
 
     if (len > 0) {
       EXPECT_EQ(0, v.front());
-      EXPECT_EQ(len - 1, v.back());
+      EXPECT_EQ(static_cast<int>(len - 1), v.back());
       v.pop_back();
       EXPECT_EQ(len - 1, v.size());
-      for (int i = 0; i < v.size(); ++i) {
-        EXPECT_EQ(i, v[i]);
-        EXPECT_EQ(i, v.at(i));
+      for (size_t i = 0; i < v.size(); ++i) {
+        EXPECT_EQ(static_cast<int>(i), v[i]);
+        EXPECT_EQ(static_cast<int>(i), v.at(i));
       }
     }
   }
@@ -191,7 +193,7 @@
 TEST(IntVec, PopBackNoOverflow) {
   IntVec v = {1};
   v.pop_back();
-  EXPECT_EQ(v.size(), 0);
+  EXPECT_EQ(v.size(), 0u);
 }
 
 TEST(IntVec, AtThrows) {
@@ -202,47 +204,47 @@
 }
 
 TEST(IntVec, ReverseIterator) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v;
     Fill(&v, len);
 
-    int counter = len;
+    size_t counter = len;
     for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) {
       counter--;
-      EXPECT_EQ(counter, *iter);
+      EXPECT_EQ(static_cast<int>(counter), *iter);
     }
-    EXPECT_EQ(counter, 0);
+    EXPECT_EQ(counter, 0u);
 
     counter = len;
     for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend();
          ++iter) {
       counter--;
-      EXPECT_EQ(counter, *iter);
+      EXPECT_EQ(static_cast<int>(counter), *iter);
     }
-    EXPECT_EQ(counter, 0);
+    EXPECT_EQ(counter, 0u);
 
     counter = len;
     for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend();
          ++iter) {
       counter--;
-      EXPECT_EQ(counter, *iter);
+      EXPECT_EQ(static_cast<int>(counter), *iter);
     }
-    EXPECT_EQ(counter, 0);
+    EXPECT_EQ(counter, 0u);
   }
 }
 
 TEST(IntVec, Erase) {
-  for (int len = 1; len < 20; len++) {
-    for (int i = 0; i < len; ++i) {
+  for (size_t len = 1; len < 20; len++) {
+    for (size_t i = 0; i < len; ++i) {
       IntVec v;
       Fill(&v, len);
       v.erase(v.begin() + i);
       EXPECT_EQ(len - 1, v.size());
-      for (int j = 0; j < i; ++j) {
-        EXPECT_EQ(j, v[j]);
+      for (size_t j = 0; j < i; ++j) {
+        EXPECT_EQ(static_cast<int>(j), v[j]);
       }
-      for (int j = i; j < len - 1; ++j) {
-        EXPECT_EQ(j + 1, v[j]);
+      for (size_t j = i; j < len - 1; ++j) {
+        EXPECT_EQ(static_cast<int>(j + 1), v[j]);
       }
     }
   }
@@ -254,51 +256,95 @@
   EXPECT_EQ(v[9], 9);
 #if !defined(NDEBUG) || ABSL_OPTION_HARDENED
   EXPECT_DEATH_IF_SUPPORTED(v[10], "");
-  EXPECT_DEATH_IF_SUPPORTED(v[-1], "");
+  EXPECT_DEATH_IF_SUPPORTED(v[static_cast<size_t>(-1)], "");
+  EXPECT_DEATH_IF_SUPPORTED(v.resize(v.max_size() + 1), "");
 #endif
 }
 
+// Move construction of a container of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+TEST(UniquePtr, MoveConstruct) {
+  for (size_t size = 0; size < 16; ++size) {
+    SCOPED_TRACE(size);
+
+    absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
+    for (size_t i = 0; i < size; ++i) {
+      a.push_back(std::make_unique<size_t>(i));
+    }
+
+    absl::InlinedVector<std::unique_ptr<size_t>, 2> b(std::move(a));
+
+    ASSERT_THAT(b, SizeIs(size));
+    for (size_t i = 0; i < size; ++i) {
+      ASSERT_THAT(b[i], Pointee(i));
+    }
+  }
+}
+
+// Move assignment of a container of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+TEST(UniquePtr, MoveAssign) {
+  for (size_t size = 0; size < 16; ++size) {
+    SCOPED_TRACE(size);
+
+    absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
+    for (size_t i = 0; i < size; ++i) {
+      a.push_back(std::make_unique<size_t>(i));
+    }
+
+    absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
+    b = std::move(a);
+
+    ASSERT_THAT(b, SizeIs(size));
+    for (size_t i = 0; i < size; ++i) {
+      ASSERT_THAT(b[i], Pointee(i));
+    }
+  }
+}
+
 // At the end of this test loop, the elements between [erase_begin, erase_end)
 // should have reference counts == 0, and all others elements should have
 // reference counts == 1.
 TEST(RefCountedVec, EraseBeginEnd) {
-  for (int len = 1; len < 20; ++len) {
-    for (int erase_begin = 0; erase_begin < len; ++erase_begin) {
-      for (int erase_end = erase_begin; erase_end <= len; ++erase_end) {
+  for (size_t len = 1; len < 20; ++len) {
+    for (size_t erase_begin = 0; erase_begin < len; ++erase_begin) {
+      for (size_t erase_end = erase_begin; erase_end <= len; ++erase_end) {
         std::vector<int> counts(len, 0);
         RefCountedVec v;
-        for (int i = 0; i < len; ++i) {
-          v.push_back(RefCounted(i, &counts[i]));
+        for (size_t i = 0; i < len; ++i) {
+          v.push_back(RefCounted(static_cast<int>(i), &counts[i]));
         }
 
-        int erase_len = erase_end - erase_begin;
+        size_t erase_len = erase_end - erase_begin;
 
         v.erase(v.begin() + erase_begin, v.begin() + erase_end);
 
         EXPECT_EQ(len - erase_len, v.size());
 
         // Check the elements before the first element erased.
-        for (int i = 0; i < erase_begin; ++i) {
-          EXPECT_EQ(i, v[i].value_);
+        for (size_t i = 0; i < erase_begin; ++i) {
+          EXPECT_EQ(static_cast<int>(i), v[i].value_);
         }
 
         // Check the elements after the first element erased.
-        for (int i = erase_begin; i < v.size(); ++i) {
-          EXPECT_EQ(i + erase_len, v[i].value_);
+        for (size_t i = erase_begin; i < v.size(); ++i) {
+          EXPECT_EQ(static_cast<int>(i + erase_len), v[i].value_);
         }
 
         // Check that the elements at the beginning are preserved.
-        for (int i = 0; i < erase_begin; ++i) {
+        for (size_t i = 0; i < erase_begin; ++i) {
           EXPECT_EQ(1, counts[i]);
         }
 
         // Check that the erased elements are destroyed
-        for (int i = erase_begin; i < erase_end; ++i) {
+        for (size_t i = erase_begin; i < erase_end; ++i) {
           EXPECT_EQ(0, counts[i]);
         }
 
         // Check that the elements at the end are preserved.
-        for (int i = erase_end; i < len; ++i) {
+        for (size_t i = erase_end; i < len; ++i) {
           EXPECT_EQ(1, counts[i]);
         }
       }
@@ -377,21 +423,21 @@
   absl::InlinedVector<std::pair<std::string, int>, 1> v;
 
   v.shrink_to_fit();
-  EXPECT_EQ(v.capacity(), 1);
+  EXPECT_EQ(v.capacity(), 1u);
 
   v.emplace_back("answer", 42);
   v.shrink_to_fit();
-  EXPECT_EQ(v.capacity(), 1);
+  EXPECT_EQ(v.capacity(), 1u);
 
   v.emplace_back("taxicab", 1729);
-  EXPECT_GE(v.capacity(), 2);
+  EXPECT_GE(v.capacity(), 2u);
   v.shrink_to_fit();
-  EXPECT_EQ(v.capacity(), 2);
+  EXPECT_EQ(v.capacity(), 2u);
 
   v.reserve(100);
-  EXPECT_GE(v.capacity(), 100);
+  EXPECT_GE(v.capacity(), 100u);
   v.shrink_to_fit();
-  EXPECT_EQ(v.capacity(), 2);
+  EXPECT_EQ(v.capacity(), 2u);
 }
 
 TEST(InlinedVectorTest, ShrinkToFitEdgeCases) {
@@ -399,10 +445,10 @@
     absl::InlinedVector<std::pair<std::string, int>, 1> v;
     v.emplace_back("answer", 42);
     v.emplace_back("taxicab", 1729);
-    EXPECT_GE(v.capacity(), 2);
+    EXPECT_GE(v.capacity(), 2u);
     v.pop_back();
     v.shrink_to_fit();
-    EXPECT_EQ(v.capacity(), 1);
+    EXPECT_EQ(v.capacity(), 1u);
     EXPECT_EQ(v[0].first, "answer");
     EXPECT_EQ(v[0].second, 42);
   }
@@ -411,34 +457,34 @@
     absl::InlinedVector<std::string, 2> v(100);
     v.resize(0);
     v.shrink_to_fit();
-    EXPECT_EQ(v.capacity(), 2);  // inlined capacity
+    EXPECT_EQ(v.capacity(), 2u);  // inlined capacity
   }
 
   {
     absl::InlinedVector<std::string, 2> v(100);
     v.resize(1);
     v.shrink_to_fit();
-    EXPECT_EQ(v.capacity(), 2);  // inlined capacity
+    EXPECT_EQ(v.capacity(), 2u);  // inlined capacity
   }
 
   {
     absl::InlinedVector<std::string, 2> v(100);
     v.resize(2);
     v.shrink_to_fit();
-    EXPECT_EQ(v.capacity(), 2);
+    EXPECT_EQ(v.capacity(), 2u);
   }
 
   {
     absl::InlinedVector<std::string, 2> v(100);
     v.resize(3);
     v.shrink_to_fit();
-    EXPECT_EQ(v.capacity(), 3);
+    EXPECT_EQ(v.capacity(), 3u);
   }
 }
 
 TEST(IntVec, Insert) {
-  for (int len = 0; len < 20; len++) {
-    for (int pos = 0; pos <= len; pos++) {
+  for (size_t len = 0; len < 20; len++) {
+    for (ptrdiff_t pos = 0; pos <= static_cast<ptrdiff_t>(len); pos++) {
       {
         // Single element
         std::vector<int> std_v;
@@ -526,16 +572,16 @@
 TEST(RefCountedVec, InsertConstructorDestructor) {
   // Make sure the proper construction/destruction happen during insert
   // operations.
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     SCOPED_TRACE(len);
-    for (int pos = 0; pos <= len; pos++) {
+    for (size_t pos = 0; pos <= len; pos++) {
       SCOPED_TRACE(pos);
       std::vector<int> counts(len, 0);
       int inserted_count = 0;
       RefCountedVec v;
-      for (int i = 0; i < len; ++i) {
+      for (size_t i = 0; i < len; ++i) {
         SCOPED_TRACE(i);
-        v.push_back(RefCounted(i, &counts[i]));
+        v.push_back(RefCounted(static_cast<int>(i), &counts[i]));
       }
 
       EXPECT_THAT(counts, Each(Eq(1)));
@@ -552,20 +598,20 @@
 }
 
 TEST(IntVec, Resize) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v;
     Fill(&v, len);
 
     // Try resizing up and down by k elements
     static const int kResizeElem = 1000000;
-    for (int k = 0; k < 10; k++) {
+    for (size_t k = 0; k < 10; k++) {
       // Enlarging resize
       v.resize(len + k, kResizeElem);
       EXPECT_EQ(len + k, v.size());
       EXPECT_LE(len + k, v.capacity());
-      for (int i = 0; i < len + k; i++) {
+      for (size_t i = 0; i < len + k; i++) {
         if (i < len) {
-          EXPECT_EQ(i, v[i]);
+          EXPECT_EQ(static_cast<int>(i), v[i]);
         } else {
           EXPECT_EQ(kResizeElem, v[i]);
         }
@@ -575,26 +621,26 @@
       v.resize(len, kResizeElem);
       EXPECT_EQ(len, v.size());
       EXPECT_LE(len, v.capacity());
-      for (int i = 0; i < len; i++) {
-        EXPECT_EQ(i, v[i]);
+      for (size_t i = 0; i < len; i++) {
+        EXPECT_EQ(static_cast<int>(i), v[i]);
       }
     }
   }
 }
 
 TEST(IntVec, InitWithLength) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v(len, 7);
     EXPECT_EQ(len, v.size());
     EXPECT_LE(len, v.capacity());
-    for (int i = 0; i < len; i++) {
+    for (size_t i = 0; i < len; i++) {
       EXPECT_EQ(7, v[i]);
     }
   }
 }
 
 TEST(IntVec, CopyConstructorAndAssignment) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v;
     Fill(&v, len);
     EXPECT_EQ(len, v.size());
@@ -603,7 +649,7 @@
     IntVec v2(v);
     EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2);
 
-    for (int start_len = 0; start_len < 20; start_len++) {
+    for (size_t start_len = 0; start_len < 20; start_len++) {
       IntVec v3;
       Fill(&v3, start_len, 99);  // Add dummy elements that should go away
       v3 = v;
@@ -613,7 +659,7 @@
 }
 
 TEST(IntVec, AliasingCopyAssignment) {
-  for (int len = 0; len < 20; ++len) {
+  for (size_t len = 0; len < 20; ++len) {
     IntVec original;
     Fill(&original, len);
     IntVec dup = original;
@@ -623,9 +669,9 @@
 }
 
 TEST(IntVec, MoveConstructorAndAssignment) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v_in;
-    const int inlined_capacity = v_in.capacity();
+    const size_t inlined_capacity = v_in.capacity();
     Fill(&v_in, len);
     EXPECT_EQ(len, v_in.size());
     EXPECT_LE(len, v_in.capacity());
@@ -642,7 +688,7 @@
         EXPECT_FALSE(v_out.data() == old_data);
       }
     }
-    for (int start_len = 0; start_len < 20; start_len++) {
+    for (size_t start_len = 0; start_len < 20; start_len++) {
       IntVec v_out;
       Fill(&v_out, start_len, 99);  // Add dummy elements that should go away
       IntVec v_temp(v_in);
@@ -681,10 +727,10 @@
 };
 
 TEST(AliasingTest, Emplace) {
-  for (int i = 2; i < 20; ++i) {
+  for (size_t i = 2; i < 20; ++i) {
     absl::InlinedVector<NotTriviallyDestructible, 10> vec;
-    for (int j = 0; j < i; ++j) {
-      vec.push_back(NotTriviallyDestructible(j));
+    for (size_t j = 0; j < i; ++j) {
+      vec.push_back(NotTriviallyDestructible(static_cast<int>(j)));
     }
     vec.emplace(vec.begin(), vec[0]);
     EXPECT_EQ(vec[0], vec[1]);
@@ -696,12 +742,12 @@
 }
 
 TEST(AliasingTest, InsertWithCount) {
-  for (int i = 1; i < 20; ++i) {
+  for (size_t i = 1; i < 20; ++i) {
     absl::InlinedVector<NotTriviallyDestructible, 10> vec;
-    for (int j = 0; j < i; ++j) {
-      vec.push_back(NotTriviallyDestructible(j));
+    for (size_t j = 0; j < i; ++j) {
+      vec.push_back(NotTriviallyDestructible(static_cast<int>(j)));
     }
-    for (int n = 0; n < 5; ++n) {
+    for (size_t n = 0; n < 5; ++n) {
       // We use back where we can because it's guaranteed to become invalidated
       vec.insert(vec.begin(), n, vec.back());
       auto b = vec.begin();
@@ -736,41 +782,45 @@
   // In particular, ensure that std::allocator doesn't cost anything to store.
   // The union should be absorbing some of the allocation bookkeeping overhead
   // in the larger vectors, leaving only the size_ field as overhead.
-  EXPECT_EQ(2 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*));
+
+  struct T { void* val; };
+  size_t expected_overhead = sizeof(T);
+
+  EXPECT_EQ((2 * expected_overhead),
+            sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8]));
 }
 
 TEST(IntVec, Clear) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     SCOPED_TRACE(len);
     IntVec v;
     Fill(&v, len);
     v.clear();
-    EXPECT_EQ(0, v.size());
+    EXPECT_EQ(0u, v.size());
     EXPECT_EQ(v.begin(), v.end());
   }
 }
 
 TEST(IntVec, Reserve) {
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     IntVec v;
     Fill(&v, len);
 
-    for (int newlen = 0; newlen < 100; newlen++) {
+    for (size_t newlen = 0; newlen < 100; newlen++) {
       const int* start_rep = v.data();
       v.reserve(newlen);
       const int* final_rep = v.data();
@@ -837,9 +887,9 @@
 }
 
 TEST(IntVec, Swap) {
-  for (int l1 = 0; l1 < 20; l1++) {
+  for (size_t l1 = 0; l1 < 20; l1++) {
     SCOPED_TRACE(l1);
-    for (int l2 = 0; l2 < 20; l2++) {
+    for (size_t l2 = 0; l2 < 20; l2++) {
       SCOPED_TRACE(l2);
       IntVec a = Fill(l1, 0);
       IntVec b = Fill(l2, 100);
@@ -849,13 +899,13 @@
       }
       EXPECT_EQ(l1, b.size());
       EXPECT_EQ(l2, a.size());
-      for (int i = 0; i < l1; i++) {
+      for (size_t i = 0; i < l1; i++) {
         SCOPED_TRACE(i);
-        EXPECT_EQ(i, b[i]);
+        EXPECT_EQ(static_cast<int>(i), b[i]);
       }
-      for (int i = 0; i < l2; i++) {
+      for (size_t i = 0; i < l2; i++) {
         SCOPED_TRACE(i);
-        EXPECT_EQ(100 + i, a[i]);
+        EXPECT_EQ(100 + static_cast<int>(i), a[i]);
       }
     }
   }
@@ -864,46 +914,48 @@
 TYPED_TEST_P(InstanceTest, Swap) {
   using Instance = TypeParam;
   using InstanceVec = absl::InlinedVector<Instance, 8>;
-  for (int l1 = 0; l1 < 20; l1++) {
+  for (size_t l1 = 0; l1 < 20; l1++) {
     SCOPED_TRACE(l1);
-    for (int l2 = 0; l2 < 20; l2++) {
+    for (size_t l2 = 0; l2 < 20; l2++) {
       SCOPED_TRACE(l2);
       InstanceTracker tracker;
       InstanceVec a, b;
       const size_t inlined_capacity = a.capacity();
       auto min_len = std::min(l1, l2);
       auto max_len = std::max(l1, l2);
-      for (int i = 0; i < l1; i++) a.push_back(Instance(i));
-      for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
-      EXPECT_EQ(tracker.instances(), l1 + l2);
+      for (size_t i = 0; i < l1; i++)
+        a.push_back(Instance(static_cast<int>(i)));
+      for (size_t i = 0; i < l2; i++)
+        b.push_back(Instance(100 + static_cast<int>(i)));
+      EXPECT_EQ(tracker.instances(), static_cast<int>(l1 + l2));
       tracker.ResetCopiesMovesSwaps();
       {
         using std::swap;
         swap(a, b);
       }
-      EXPECT_EQ(tracker.instances(), l1 + l2);
+      EXPECT_EQ(tracker.instances(), static_cast<int>(l1 + l2));
       if (a.size() > inlined_capacity && b.size() > inlined_capacity) {
         EXPECT_EQ(tracker.swaps(), 0);  // Allocations are swapped.
         EXPECT_EQ(tracker.moves(), 0);
       } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) {
-        EXPECT_EQ(tracker.swaps(), min_len);
+        EXPECT_EQ(tracker.swaps(), static_cast<int>(min_len));
         EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()),
-                  max_len - min_len);
+                  static_cast<int>(max_len - min_len));
       } else {
         // One is allocated and the other isn't. The allocation is transferred
         // without copying elements, and the inlined instances are copied/moved.
         EXPECT_EQ(tracker.swaps(), 0);
         EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()),
-                  min_len);
+                  static_cast<int>(min_len));
       }
 
       EXPECT_EQ(l1, b.size());
       EXPECT_EQ(l2, a.size());
-      for (int i = 0; i < l1; i++) {
-        EXPECT_EQ(i, b[i].value());
+      for (size_t i = 0; i < l1; i++) {
+        EXPECT_EQ(static_cast<int>(i), b[i].value());
       }
-      for (int i = 0; i < l2; i++) {
-        EXPECT_EQ(100 + i, a[i].value());
+      for (size_t i = 0; i < l2; i++) {
+        EXPECT_EQ(100 + static_cast<int>(i), a[i].value());
       }
     }
   }
@@ -932,9 +984,9 @@
 
   a.clear();
   b.clear();
-  for (int i = 0; i < 100; i++) {
-    a.push_back(i);
-    b.push_back(i);
+  for (size_t i = 0; i < 100; i++) {
+    a.push_back(static_cast<int>(i));
+    b.push_back(static_cast<int>(i));
     EXPECT_TRUE(a == b);
     EXPECT_FALSE(a != b);
 
@@ -973,26 +1025,26 @@
   using Instance = TypeParam;
   using InstanceVec = absl::InlinedVector<Instance, 8>;
   InstanceTracker tracker;
-  for (int len = 0; len < 20; len++) {
+  for (size_t len = 0; len < 20; len++) {
     SCOPED_TRACE(len);
     tracker.ResetCopiesMovesSwaps();
 
     InstanceVec v;
     const size_t inlined_capacity = v.capacity();
-    for (int i = 0; i < len; i++) {
-      v.push_back(Instance(i));
+    for (size_t i = 0; i < len; i++) {
+      v.push_back(Instance(static_cast<int>(i)));
     }
-    EXPECT_EQ(tracker.instances(), len);
+    EXPECT_EQ(tracker.instances(), static_cast<int>(len));
     EXPECT_GE(tracker.copies() + tracker.moves(),
-              len);  // More due to reallocation.
+              static_cast<int>(len));  // More due to reallocation.
     tracker.ResetCopiesMovesSwaps();
 
     // Enlarging resize() must construct some objects
     tracker.ResetCopiesMovesSwaps();
     v.resize(len + 10, Instance(100));
-    EXPECT_EQ(tracker.instances(), len + 10);
+    EXPECT_EQ(tracker.instances(), static_cast<int>(len) + 10);
     if (len <= inlined_capacity && len + 10 > inlined_capacity) {
-      EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len);
+      EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + static_cast<int>(len));
     } else {
       // Only specify a minimum number of copies + moves. We don't want to
       // depend on the reallocation policy here.
@@ -1003,29 +1055,30 @@
     // Shrinking resize() must destroy some objects
     tracker.ResetCopiesMovesSwaps();
     v.resize(len, Instance(100));
-    EXPECT_EQ(tracker.instances(), len);
+    EXPECT_EQ(tracker.instances(), static_cast<int>(len));
     EXPECT_EQ(tracker.copies(), 0);
     EXPECT_EQ(tracker.moves(), 0);
 
     // reserve() must not increase the number of initialized objects
     SCOPED_TRACE("reserve");
     v.reserve(len + 1000);
-    EXPECT_EQ(tracker.instances(), len);
-    EXPECT_EQ(tracker.copies() + tracker.moves(), len);
+    EXPECT_EQ(tracker.instances(), static_cast<int>(len));
+    EXPECT_EQ(tracker.copies() + tracker.moves(), static_cast<int>(len));
 
     // pop_back() and erase() must destroy one object
     if (len > 0) {
       tracker.ResetCopiesMovesSwaps();
       v.pop_back();
-      EXPECT_EQ(tracker.instances(), len - 1);
+      EXPECT_EQ(tracker.instances(), static_cast<int>(len) - 1);
       EXPECT_EQ(tracker.copies(), 0);
       EXPECT_EQ(tracker.moves(), 0);
 
       if (!v.empty()) {
         tracker.ResetCopiesMovesSwaps();
         v.erase(v.begin());
-        EXPECT_EQ(tracker.instances(), len - 2);
-        EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2);
+        EXPECT_EQ(tracker.instances(), static_cast<int>(len) - 2);
+        EXPECT_EQ(tracker.copies() + tracker.moves(),
+                  static_cast<int>(len) - 2);
       }
     }
 
@@ -1082,12 +1135,12 @@
     tracker.ResetCopiesMovesSwaps();
     {
       InstanceVec v_copy(std::move(v));
-      if (len > inlined_capacity) {
+      if (static_cast<size_t>(len) > inlined_capacity) {
         // Allocation is moved as a whole.
         EXPECT_EQ(tracker.instances(), len);
         EXPECT_EQ(tracker.live_instances(), len);
         // Tests an implementation detail, don't rely on this in your code.
-        EXPECT_EQ(v.size(), 0);  // NOLINT misc-use-after-move
+        EXPECT_EQ(v.size(), 0u);  // NOLINT misc-use-after-move
         EXPECT_EQ(tracker.copies(), 0);
         EXPECT_EQ(tracker.moves(), 0);
       } else {
@@ -1153,7 +1206,7 @@
       tracker.ResetCopiesMovesSwaps();
 
       InstanceVec longer, shorter;
-      const int inlined_capacity = longer.capacity();
+      const size_t inlined_capacity = longer.capacity();
       for (int i = 0; i < len; i++) {
         longer.push_back(Instance(i));
         shorter.push_back(Instance(i));
@@ -1172,7 +1225,7 @@
         src_len = len;
         longer = std::move(shorter);
       }
-      if (src_len > inlined_capacity) {
+      if (static_cast<size_t>(src_len) > inlined_capacity) {
         // Allocation moved as a whole.
         EXPECT_EQ(tracker.instances(), src_len);
         EXPECT_EQ(tracker.live_instances(), src_len);
@@ -1197,6 +1250,8 @@
 }
 
 TEST(CountElemAssign, SimpleTypeWithInlineBacking) {
+  const size_t inlined_capacity = absl::InlinedVector<int, 2>().capacity();
+
   for (size_t original_size = 0; original_size <= 5; ++original_size) {
     SCOPED_TRACE(original_size);
     // Original contents are [12345, 12345, ...]
@@ -1205,10 +1260,10 @@
     absl::InlinedVector<int, 2> v(original_contents.begin(),
                                   original_contents.end());
     v.assign(2, 123);
-    EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123)));
-    if (original_size <= 2) {
+    EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(123, 123)));
+    if (original_size <= inlined_capacity) {
       // If the original had inline backing, it should stay inline.
-      EXPECT_EQ(2, v.capacity());
+      EXPECT_EQ(v.capacity(), inlined_capacity);
     }
   }
 }
@@ -1222,7 +1277,7 @@
     absl::InlinedVector<int, 2> v(original_contents.begin(),
                                   original_contents.end());
     v.assign(3, 123);
-    EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123)));
+    EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(123, 123, 123)));
     EXPECT_LE(v.size(), v.capacity());
   }
 }
@@ -1237,10 +1292,10 @@
     absl::InlinedVector<Instance, 2> v(original_contents.begin(),
                                        original_contents.end());
     v.assign(2, Instance(123));
-    EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123))));
+    EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(ValueIs(123), ValueIs(123))));
     if (original_size <= 2) {
       // If the original had inline backing, it should stay inline.
-      EXPECT_EQ(2, v.capacity());
+      EXPECT_EQ(2u, v.capacity());
     }
   }
 }
@@ -1255,8 +1310,8 @@
     absl::InlinedVector<Instance, 2> v(original_contents.begin(),
                                        original_contents.end());
     v.assign(3, Instance(123));
-    EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123),
-                                                ValueIs(123))));
+    EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(ValueIs(123), ValueIs(123),
+                                                 ValueIs(123))));
     EXPECT_LE(v.size(), v.capacity());
   }
 }
@@ -1271,16 +1326,17 @@
   std::vector<int> source_v = {4, 5, 6};
   // First try to fit in inline backing
   absl::InlinedVector<int, 4> v(source_v.begin(), source_v.end());
-  EXPECT_EQ(3, v.size());
-  EXPECT_EQ(4, v.capacity());  // Indication that we're still on inlined storage
+  EXPECT_EQ(3u, v.size());
+  EXPECT_EQ(4u,
+            v.capacity());  // Indication that we're still on inlined storage
   EXPECT_EQ(4, v[0]);
   EXPECT_EQ(5, v[1]);
   EXPECT_EQ(6, v[2]);
 
   // Now, force a re-allocate
   absl::InlinedVector<int, 2> realloc_v(source_v.begin(), source_v.end());
-  EXPECT_EQ(3, realloc_v.size());
-  EXPECT_LT(2, realloc_v.capacity());
+  EXPECT_EQ(3u, realloc_v.size());
+  EXPECT_LT(2u, realloc_v.capacity());
   EXPECT_EQ(4, realloc_v[0]);
   EXPECT_EQ(5, realloc_v[1]);
   EXPECT_EQ(6, realloc_v[2]);
@@ -1295,8 +1351,8 @@
   tracker.ResetCopiesMovesSwaps();
   absl::InlinedVector<Instance, inlined_capacity> v(source_v.begin(),
                                                     source_v.end());
-  EXPECT_EQ(2, v.size());
-  EXPECT_LT(1, v.capacity());
+  EXPECT_EQ(2u, v.size());
+  EXPECT_LT(1u, v.capacity());
   EXPECT_EQ(0, v[0].value());
   EXPECT_EQ(1, v[1].value());
   EXPECT_EQ(tracker.copies(), 2);
@@ -1348,6 +1404,8 @@
 }
 
 TEST(RangedAssign, SimpleType) {
+  const size_t inlined_capacity = absl::InlinedVector<int, 3>().capacity();
+
   // Test for all combinations of original sizes (empty and non-empty inline,
   // and out of line) and target sizes.
   for (size_t original_size = 0; original_size <= 5; ++original_size) {
@@ -1361,7 +1419,7 @@
       // New contents are [3, 4, ...]
       std::vector<int> new_contents;
       for (size_t i = 0; i < target_size; ++i) {
-        new_contents.push_back(i + 3);
+        new_contents.push_back(static_cast<int>(i + 3));
       }
 
       absl::InlinedVector<int, 3> v(original_contents.begin(),
@@ -1370,9 +1428,10 @@
 
       EXPECT_EQ(new_contents.size(), v.size());
       EXPECT_LE(new_contents.size(), v.capacity());
-      if (target_size <= 3 && original_size <= 3) {
+      if (target_size <= inlined_capacity &&
+          original_size <= inlined_capacity) {
         // Storage should stay inline when target size is small.
-        EXPECT_EQ(3, v.capacity());
+        EXPECT_EQ(v.capacity(), inlined_capacity);
       }
       EXPECT_THAT(v, ElementsAreArray(new_contents));
     }
@@ -1405,7 +1464,7 @@
       // TODO(bsamwel): Test with an input iterator.
       std::vector<Instance> new_contents_in;
       for (size_t i = 0; i < target_size; ++i) {
-        new_contents_in.push_back(Instance(i + 3));
+        new_contents_in.push_back(Instance(static_cast<int>(i) + 3));
       }
       SourceContainer new_contents(new_contents_in.begin(),
                                    new_contents_in.end());
@@ -1418,7 +1477,7 @@
       EXPECT_LE(new_contents.size(), v.capacity());
       if (target_size <= 3 && original_size <= 3) {
         // Storage should stay inline when target size is small.
-        EXPECT_EQ(3, v.capacity());
+        EXPECT_EQ(3u, v.capacity());
       }
       EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(),
                              InstanceValuesEqual<Instance>));
@@ -1442,12 +1501,12 @@
 
 TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) {
   EXPECT_THAT((absl::InlinedVector<int, 4>{4, 5, 6}),
-              AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6)));
+              AllOf(SizeIs(3u), CapacityIs(4u), ElementsAre(4, 5, 6)));
 }
 
 TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) {
   EXPECT_THAT((absl::InlinedVector<int, 2>{4, 5, 6}),
-              AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6)));
+              AllOf(SizeIs(3u), CapacityIs(Gt(2u)), ElementsAre(4, 5, 6)));
 }
 
 TEST(InitializerListConstructor, DisparateTypesInList) {
@@ -1458,16 +1517,19 @@
 }
 
 TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) {
-  EXPECT_THAT((absl::InlinedVector<CopyableMovableInstance, 1>{
-                  CopyableMovableInstance(0)}),
-              AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0))));
+  const size_t inlined_capacity =
+      absl::InlinedVector<CopyableMovableInstance, 1>().capacity();
+  EXPECT_THAT(
+      (absl::InlinedVector<CopyableMovableInstance, 1>{
+          CopyableMovableInstance(0)}),
+      AllOf(SizeIs(1u), CapacityIs(inlined_capacity), ElementsAre(ValueIs(0))));
 }
 
 TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) {
-  EXPECT_THAT(
-      (absl::InlinedVector<CopyableMovableInstance, 1>{
-          CopyableMovableInstance(0), CopyableMovableInstance(1)}),
-      AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1))));
+  EXPECT_THAT((absl::InlinedVector<CopyableMovableInstance, 1>{
+                  CopyableMovableInstance(0), CopyableMovableInstance(1)}),
+              AllOf(SizeIs(2u), CapacityIs(Gt(1u)),
+                    ElementsAre(ValueIs(0), ValueIs(1))));
 }
 
 TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) {
@@ -1477,14 +1539,14 @@
     absl::InlinedVector<int, 2> v1(original_size, 12345);
     const size_t original_capacity_v1 = v1.capacity();
     v1.assign({3});
-    EXPECT_THAT(
-        v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3)));
+    EXPECT_THAT(v1, AllOf(SizeIs(1u), CapacityIs(original_capacity_v1),
+                          ElementsAre(3)));
 
     absl::InlinedVector<int, 2> v2(original_size, 12345);
     const size_t original_capacity_v2 = v2.capacity();
     v2 = {3};
-    EXPECT_THAT(
-        v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3)));
+    EXPECT_THAT(v2, AllOf(SizeIs(1u), CapacityIs(original_capacity_v2),
+                          ElementsAre(3)));
   }
 }
 
@@ -1493,13 +1555,13 @@
     SCOPED_TRACE(original_size);
     absl::InlinedVector<int, 2> v1(original_size, 12345);
     v1.assign({3, 4, 5});
-    EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5)));
-    EXPECT_LE(3, v1.capacity());
+    EXPECT_THAT(v1, AllOf(SizeIs(3u), ElementsAre(3, 4, 5)));
+    EXPECT_LE(3u, v1.capacity());
 
     absl::InlinedVector<int, 2> v2(original_size, 12345);
     v2 = {3, 4, 5};
-    EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5)));
-    EXPECT_LE(3, v2.capacity());
+    EXPECT_THAT(v2, AllOf(SizeIs(3u), ElementsAre(3, 4, 5)));
+    EXPECT_LE(3u, v2.capacity());
   }
 }
 
@@ -1528,7 +1590,7 @@
     absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
     const size_t original_capacity = v.capacity();
     v.assign({Instance(3)});
-    EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity),
+    EXPECT_THAT(v, AllOf(SizeIs(1u), CapacityIs(original_capacity),
                          ElementsAre(ValueIs(3))));
   }
   for (size_t original_size = 0; original_size <= 4; ++original_size) {
@@ -1536,28 +1598,53 @@
     absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
     v.assign({Instance(3), Instance(4), Instance(5)});
     EXPECT_THAT(
-        v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
-    EXPECT_LE(3, v.capacity());
+        v, AllOf(SizeIs(3u), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
+    EXPECT_LE(3u, v.capacity());
   }
 }
 
-REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors,
-                           CountConstructorsDestructorsOnCopyConstruction,
-                           CountConstructorsDestructorsOnMoveConstruction,
-                           CountConstructorsDestructorsOnAssignment,
-                           CountConstructorsDestructorsOnMoveAssignment,
-                           CountElemAssignInlineBacking, RangedConstructor,
-                           RangedAssign, InitializerListAssign);
+REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors,
+                            CountConstructorsDestructorsOnCopyConstruction,
+                            CountConstructorsDestructorsOnMoveConstruction,
+                            CountConstructorsDestructorsOnAssignment,
+                            CountConstructorsDestructorsOnMoveAssignment,
+                            CountElemAssignInlineBacking, RangedConstructor,
+                            RangedAssign, InitializerListAssign);
 
 using InstanceTypes =
     ::testing::Types<CopyableOnlyInstance, CopyableMovableInstance>;
-INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest,
+                               InstanceTypes);
 
 TEST(DynamicVec, DynamicVecCompiles) {
   DynamicVec v;
   (void)v;
 }
 
+TEST(DynamicVec, CreateNonEmptyDynamicVec) {
+  DynamicVec v(1);
+  EXPECT_EQ(v.size(), 1u);
+}
+
+TEST(DynamicVec, EmplaceBack) {
+  DynamicVec v;
+  v.emplace_back(Dynamic{});
+  EXPECT_EQ(v.size(), 1u);
+}
+
+TEST(DynamicVec, EmplaceBackAfterHeapAllocation) {
+  DynamicVec v;
+  v.reserve(10);
+  v.emplace_back(Dynamic{});
+  EXPECT_EQ(v.size(), 1u);
+}
+
+TEST(DynamicVec, EmptyIteratorComparison) {
+  DynamicVec v;
+  EXPECT_EQ(v.begin(), v.end());
+  EXPECT_EQ(v.cbegin(), v.cend());
+}
+
 TEST(AllocatorSupportTest, Constructors) {
   using MyAlloc = CountingAllocator<int>;
   using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
@@ -1582,54 +1669,54 @@
   MyAlloc alloc(&allocated);
   {
     AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc);
-    EXPECT_THAT(allocated, 0);
+    EXPECT_THAT(allocated, Eq(0));
   }
-  EXPECT_THAT(allocated, 0);
+  EXPECT_THAT(allocated, Eq(0));
   {
     AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
-    EXPECT_THAT(allocated, v.size() * sizeof(int));
+    EXPECT_THAT(allocated, Eq(static_cast<int64_t>(v.size() * sizeof(int))));
   }
-  EXPECT_THAT(allocated, 0);
+  EXPECT_THAT(allocated, Eq(0));
   {
     AllocVec v(4, 1, alloc);
-    EXPECT_THAT(allocated, 0);
+    EXPECT_THAT(allocated, Eq(0));
 
     int64_t allocated2 = 0;
     MyAlloc alloc2(&allocated2);
     AllocVec v2(v, alloc2);
-    EXPECT_THAT(allocated2, 0);
+    EXPECT_THAT(allocated2, Eq(0));
 
     int64_t allocated3 = 0;
     MyAlloc alloc3(&allocated3);
     AllocVec v3(std::move(v), alloc3);
-    EXPECT_THAT(allocated3, 0);
+    EXPECT_THAT(allocated3, Eq(0));
   }
   EXPECT_THAT(allocated, 0);
   {
     AllocVec v(8, 2, alloc);
-    EXPECT_THAT(allocated, v.size() * sizeof(int));
+    EXPECT_THAT(allocated, Eq(static_cast<int64_t>(v.size() * sizeof(int))));
 
     int64_t allocated2 = 0;
     MyAlloc alloc2(&allocated2);
     AllocVec v2(v, alloc2);
-    EXPECT_THAT(allocated2, v2.size() * sizeof(int));
+    EXPECT_THAT(allocated2, Eq(static_cast<int64_t>(v2.size() * sizeof(int))));
 
     int64_t allocated3 = 0;
     MyAlloc alloc3(&allocated3);
     AllocVec v3(std::move(v), alloc3);
-    EXPECT_THAT(allocated3, v3.size() * sizeof(int));
+    EXPECT_THAT(allocated3, Eq(static_cast<int64_t>(v3.size() * sizeof(int))));
   }
   EXPECT_EQ(allocated, 0);
   {
     // Test shrink_to_fit deallocations.
     AllocVec v(8, 2, alloc);
-    EXPECT_EQ(allocated, 8 * sizeof(int));
+    EXPECT_EQ(allocated, static_cast<int64_t>(8 * sizeof(int)));
     v.resize(5);
-    EXPECT_EQ(allocated, 8 * sizeof(int));
+    EXPECT_EQ(allocated, static_cast<int64_t>(8 * sizeof(int)));
     v.shrink_to_fit();
-    EXPECT_EQ(allocated, 5 * sizeof(int));
+    EXPECT_EQ(allocated, static_cast<int64_t>(5 * sizeof(int)));
     v.resize(4);
-    EXPECT_EQ(allocated, 5 * sizeof(int));
+    EXPECT_EQ(allocated, static_cast<int64_t>(5 * sizeof(int)));
     v.shrink_to_fit();
     EXPECT_EQ(allocated, 0);
   }
@@ -1648,13 +1735,17 @@
     AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
     AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
     EXPECT_LT(v1.capacity(), v2.capacity());
-    EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
-    EXPECT_THAT(allocated2, v2.capacity() * sizeof(int));
+    EXPECT_THAT(allocated1,
+                Eq(static_cast<int64_t>(v1.capacity() * sizeof(int))));
+    EXPECT_THAT(allocated2,
+                Eq(static_cast<int64_t>(v2.capacity() * sizeof(int))));
     v1.swap(v2);
     EXPECT_THAT(v1, ElementsAreArray(ia2));
     EXPECT_THAT(v2, ElementsAreArray(ia1));
-    EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
-    EXPECT_THAT(allocated2, v1.capacity() * sizeof(int));
+    EXPECT_THAT(allocated1,
+                Eq(static_cast<int64_t>(v2.capacity() * sizeof(int))));
+    EXPECT_THAT(allocated2,
+                Eq(static_cast<int64_t>(v1.capacity() * sizeof(int))));
   }
   EXPECT_THAT(allocated1, 0);
   EXPECT_THAT(allocated2, 0);
@@ -1672,13 +1763,15 @@
     MyAlloc a2(&allocated2);
     AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
     AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
-    EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
-    EXPECT_THAT(allocated2, 0);
+    EXPECT_THAT(allocated1,
+                Eq(static_cast<int64_t>(v1.capacity() * sizeof(int))));
+    EXPECT_THAT(allocated2, Eq(0));
     v1.swap(v2);
     EXPECT_THAT(v1, ElementsAreArray(ia2));
     EXPECT_THAT(v2, ElementsAreArray(ia1));
-    EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
-    EXPECT_THAT(allocated2, 0);
+    EXPECT_THAT(allocated1,
+                Eq(static_cast<int64_t>(v2.capacity() * sizeof(int))));
+    EXPECT_THAT(allocated2, Eq(0));
     EXPECT_TRUE(v2.get_allocator() == a1);
     EXPECT_TRUE(v1.get_allocator() == a2);
   }
@@ -1740,7 +1833,7 @@
 }
 
 TEST(AllocatorSupportTest, SizeAllocConstructor) {
-  constexpr int inlined_size = 4;
+  constexpr size_t inlined_size = 4;
   using Alloc = CountingAllocator<int>;
   using AllocVec = absl::InlinedVector<int, inlined_size, Alloc>;
 
@@ -1750,7 +1843,7 @@
     auto v = AllocVec(len, Alloc(&allocated));
 
     // Inline storage used; allocator should not be invoked
-    EXPECT_THAT(allocated, 0);
+    EXPECT_THAT(allocated, Eq(0));
     EXPECT_THAT(v, AllOf(SizeIs(len), Each(0)));
   }
 
@@ -1760,7 +1853,7 @@
     auto v = AllocVec(len, Alloc(&allocated));
 
     // Out of line storage used; allocation of 8 elements expected
-    EXPECT_THAT(allocated, len * sizeof(int));
+    EXPECT_THAT(allocated, Eq(static_cast<int64_t>(len * sizeof(int))));
     EXPECT_THAT(v, AllOf(SizeIs(len), Each(0)));
   }
 }
@@ -1795,9 +1888,9 @@
 
   // Generate a variety of vectors some of these are small enough for the inline
   // space but are stored out of line.
-  for (int i = 0; i < 10; ++i) {
+  for (size_t i = 0; i < 10; ++i) {
     V v;
-    for (int j = 0; j < i; ++j) {
+    for (int j = 0; j < static_cast<int>(i); ++j) {
       v.push_back(j);
     }
     cases.push_back(v);
@@ -1808,4 +1901,226 @@
   EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
 }
 
+class MoveConstructibleOnlyInstance
+    : public absl::test_internal::BaseCountedInstance {
+ public:
+  explicit MoveConstructibleOnlyInstance(int x) : BaseCountedInstance(x) {}
+  MoveConstructibleOnlyInstance(MoveConstructibleOnlyInstance&& other) =
+      default;
+  MoveConstructibleOnlyInstance& operator=(
+      MoveConstructibleOnlyInstance&& other) = delete;
+};
+
+MATCHER(HasValue, "") {
+  return ::testing::get<0>(arg).value() == ::testing::get<1>(arg);
+}
+
+TEST(NonAssignableMoveAssignmentTest, AllocatedToInline) {
+  using X = MoveConstructibleOnlyInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> inlined;
+  inlined.emplace_back(1);
+  absl::InlinedVector<X, 2> allocated;
+  allocated.emplace_back(1);
+  allocated.emplace_back(2);
+  allocated.emplace_back(3);
+  tracker.ResetCopiesMovesSwaps();
+
+  inlined = std::move(allocated);
+  // passed ownership of the allocated storage
+  EXPECT_EQ(tracker.moves(), 0);
+  EXPECT_EQ(tracker.live_instances(), 3);
+
+  EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+TEST(NonAssignableMoveAssignmentTest, InlineToAllocated) {
+  using X = MoveConstructibleOnlyInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> inlined;
+  inlined.emplace_back(1);
+  absl::InlinedVector<X, 2> allocated;
+  allocated.emplace_back(1);
+  allocated.emplace_back(2);
+  allocated.emplace_back(3);
+  tracker.ResetCopiesMovesSwaps();
+
+  allocated = std::move(inlined);
+  // Moved elements
+  EXPECT_EQ(tracker.moves(), 1);
+  EXPECT_EQ(tracker.live_instances(), 1);
+
+  EXPECT_THAT(allocated, Pointwise(HasValue(), {1}));
+}
+
+TEST(NonAssignableMoveAssignmentTest, InlineToInline) {
+  using X = MoveConstructibleOnlyInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> inlined_a;
+  inlined_a.emplace_back(1);
+  absl::InlinedVector<X, 2> inlined_b;
+  inlined_b.emplace_back(1);
+  tracker.ResetCopiesMovesSwaps();
+
+  inlined_a = std::move(inlined_b);
+  // Moved elements
+  EXPECT_EQ(tracker.moves(), 1);
+  EXPECT_EQ(tracker.live_instances(), 1);
+
+  EXPECT_THAT(inlined_a, Pointwise(HasValue(), {1}));
+}
+
+TEST(NonAssignableMoveAssignmentTest, AllocatedToAllocated) {
+  using X = MoveConstructibleOnlyInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> allocated_a;
+  allocated_a.emplace_back(1);
+  allocated_a.emplace_back(2);
+  allocated_a.emplace_back(3);
+  absl::InlinedVector<X, 2> allocated_b;
+  allocated_b.emplace_back(4);
+  allocated_b.emplace_back(5);
+  allocated_b.emplace_back(6);
+  allocated_b.emplace_back(7);
+  tracker.ResetCopiesMovesSwaps();
+
+  allocated_a = std::move(allocated_b);
+  // passed ownership of the allocated storage
+  EXPECT_EQ(tracker.moves(), 0);
+  EXPECT_EQ(tracker.live_instances(), 4);
+
+  EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
+}
+
+TEST(NonAssignableMoveAssignmentTest, AssignThis) {
+  using X = MoveConstructibleOnlyInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> v;
+  v.emplace_back(1);
+  v.emplace_back(2);
+  v.emplace_back(3);
+
+  tracker.ResetCopiesMovesSwaps();
+
+  // Obfuscated in order to pass -Wself-move.
+  v = std::move(*std::addressof(v));
+  // nothing happens
+  EXPECT_EQ(tracker.moves(), 0);
+  EXPECT_EQ(tracker.live_instances(), 3);
+
+  EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+class NonSwappableInstance : public absl::test_internal::BaseCountedInstance {
+ public:
+  explicit NonSwappableInstance(int x) : BaseCountedInstance(x) {}
+  NonSwappableInstance(const NonSwappableInstance& other) = default;
+  NonSwappableInstance& operator=(const NonSwappableInstance& other) = default;
+  NonSwappableInstance(NonSwappableInstance&& other) = default;
+  NonSwappableInstance& operator=(NonSwappableInstance&& other) = default;
+};
+
+void swap(NonSwappableInstance&, NonSwappableInstance&) = delete;
+
+TEST(NonSwappableSwapTest, InlineAndAllocatedTransferStorageAndMove) {
+  using X = NonSwappableInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> inlined;
+  inlined.emplace_back(1);
+  absl::InlinedVector<X, 2> allocated;
+  allocated.emplace_back(1);
+  allocated.emplace_back(2);
+  allocated.emplace_back(3);
+  tracker.ResetCopiesMovesSwaps();
+
+  inlined.swap(allocated);
+  EXPECT_EQ(tracker.moves(), 1);
+  EXPECT_EQ(tracker.live_instances(), 4);
+
+  EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+TEST(NonSwappableSwapTest, InlineAndInlineMoveIndividualElements) {
+  using X = NonSwappableInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> inlined_a;
+  inlined_a.emplace_back(1);
+  absl::InlinedVector<X, 2> inlined_b;
+  inlined_b.emplace_back(2);
+  tracker.ResetCopiesMovesSwaps();
+
+  inlined_a.swap(inlined_b);
+  EXPECT_EQ(tracker.moves(), 3);
+  EXPECT_EQ(tracker.live_instances(), 2);
+
+  EXPECT_THAT(inlined_a, Pointwise(HasValue(), {2}));
+  EXPECT_THAT(inlined_b, Pointwise(HasValue(), {1}));
+}
+
+TEST(NonSwappableSwapTest, AllocatedAndAllocatedOnlyTransferStorage) {
+  using X = NonSwappableInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> allocated_a;
+  allocated_a.emplace_back(1);
+  allocated_a.emplace_back(2);
+  allocated_a.emplace_back(3);
+  absl::InlinedVector<X, 2> allocated_b;
+  allocated_b.emplace_back(4);
+  allocated_b.emplace_back(5);
+  allocated_b.emplace_back(6);
+  allocated_b.emplace_back(7);
+  tracker.ResetCopiesMovesSwaps();
+
+  allocated_a.swap(allocated_b);
+  EXPECT_EQ(tracker.moves(), 0);
+  EXPECT_EQ(tracker.live_instances(), 7);
+
+  EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
+  EXPECT_THAT(allocated_b, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+TEST(NonSwappableSwapTest, SwapThis) {
+  using X = NonSwappableInstance;
+  InstanceTracker tracker;
+  absl::InlinedVector<X, 2> v;
+  v.emplace_back(1);
+  v.emplace_back(2);
+  v.emplace_back(3);
+
+  tracker.ResetCopiesMovesSwaps();
+
+  v.swap(v);
+  EXPECT_EQ(tracker.moves(), 0);
+  EXPECT_EQ(tracker.live_instances(), 3);
+
+  EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+template <size_t N>
+using CharVec = absl::InlinedVector<char, N>;
+
+// Warning: This struct "simulates" the type `InlinedVector::Storage::Allocated`
+// to make reasonable expectations for inlined storage capacity optimization. If
+// implementation changes `Allocated`, then `MySpan` and tests that use it need
+// to be updated accordingly.
+template <typename T>
+struct MySpan {
+  T* data;
+  size_t size;
+};
+
+TEST(StorageTest, InlinedCapacityAutoIncrease) {
+  // The requested capacity is auto increased to `sizeof(MySpan<char>)`.
+  EXPECT_GT(CharVec<1>().capacity(), 1);
+  EXPECT_EQ(CharVec<1>().capacity(), sizeof(MySpan<char>));
+  EXPECT_EQ(CharVec<1>().capacity(), CharVec<2>().capacity());
+  EXPECT_EQ(sizeof(CharVec<1>), sizeof(CharVec<2>));
+
+  // The requested capacity is auto increased to
+  // `sizeof(MySpan<int>) / sizeof(int)`.
+  EXPECT_GT((absl::InlinedVector<int, 1>().capacity()), 1);
+  EXPECT_EQ((absl::InlinedVector<int, 1>().capacity()),
+            sizeof(MySpan<int>) / sizeof(int));
+}
+
 }  // anonymous namespace
diff --git a/abseil-cpp/absl/container/internal/btree.h b/abseil-cpp/absl/container/internal/btree.h
index 002ccc1..569faa0 100644
--- a/abseil-cpp/absl/container/internal/btree.h
+++ b/abseil-cpp/absl/container/internal/btree.h
@@ -58,8 +58,10 @@
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/container/internal/common.h"
+#include "absl/container/internal/common_policy_traits.h"
 #include "absl/container/internal/compressed_tuple.h"
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/layout.h"
@@ -74,12 +76,30 @@
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
 
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+    defined(ABSL_HAVE_MEMORY_SANITIZER)
+// When compiled in sanitizer mode, we add generation integers to the nodes and
+// iterators. When iterators are used, we validate that the container has not
+// been mutated since the iterator was constructed.
+#define ABSL_BTREE_ENABLE_GENERATIONS
+#endif
+
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+constexpr bool BtreeGenerationsEnabled() { return true; }
+#else
+constexpr bool BtreeGenerationsEnabled() { return false; }
+#endif
+
+template <typename Compare, typename T, typename U>
+using compare_result_t = absl::result_of_t<const Compare(const T &, const U &)>;
+
 // A helper class that indicates if the Compare parameter is a key-compare-to
 // comparator.
 template <typename Compare, typename T>
 using btree_is_key_compare_to =
-    std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
-                        absl::weak_ordering>;
+    std::is_convertible<compare_result_t<Compare, T, T>, absl::weak_ordering>;
 
 struct StringBtreeDefaultLess {
   using is_transparent = void;
@@ -87,8 +107,13 @@
   StringBtreeDefaultLess() = default;
 
   // Compatibility constructor.
-  StringBtreeDefaultLess(std::less<std::string>) {}  // NOLINT
-  StringBtreeDefaultLess(std::less<string_view>) {}  // NOLINT
+  StringBtreeDefaultLess(std::less<std::string>) {}        // NOLINT
+  StringBtreeDefaultLess(std::less<absl::string_view>) {}  // NOLINT
+
+  // Allow converting to std::less for use in key_comp()/value_comp().
+  explicit operator std::less<std::string>() const { return {}; }
+  explicit operator std::less<absl::string_view>() const { return {}; }
+  explicit operator std::less<absl::Cord>() const { return {}; }
 
   absl::weak_ordering operator()(absl::string_view lhs,
                                  absl::string_view rhs) const {
@@ -114,8 +139,13 @@
 
   StringBtreeDefaultGreater() = default;
 
-  StringBtreeDefaultGreater(std::greater<std::string>) {}  // NOLINT
-  StringBtreeDefaultGreater(std::greater<string_view>) {}  // NOLINT
+  StringBtreeDefaultGreater(std::greater<std::string>) {}        // NOLINT
+  StringBtreeDefaultGreater(std::greater<absl::string_view>) {}  // NOLINT
+
+  // Allow converting to std::greater for use in key_comp()/value_comp().
+  explicit operator std::greater<std::string>() const { return {}; }
+  explicit operator std::greater<absl::string_view>() const { return {}; }
+  explicit operator std::greater<absl::Cord>() const { return {}; }
 
   absl::weak_ordering operator()(absl::string_view lhs,
                                  absl::string_view rhs) const {
@@ -136,73 +166,234 @@
   }
 };
 
-// A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns an `absl::weak_ordering`. This helper
-// class is specialized for less<std::string>, greater<std::string>,
-// less<string_view>, greater<string_view>, less<absl::Cord>, and
-// greater<absl::Cord>.
-//
-// key_compare_to_adapter is provided so that btree users
-// automatically get the more efficient compare-to code when using common
-// Abseil string types with common comparison functors.
-// These string-like specializations also turn on heterogeneous lookup by
-// default.
+// See below comments for checked_compare.
+template <typename Compare, bool is_class = std::is_class<Compare>::value>
+struct checked_compare_base : Compare {
+  using Compare::Compare;
+  explicit checked_compare_base(Compare c) : Compare(std::move(c)) {}
+  const Compare &comp() const { return *this; }
+};
 template <typename Compare>
-struct key_compare_to_adapter {
-  using type = Compare;
+struct checked_compare_base<Compare, false> {
+  explicit checked_compare_base(Compare c) : compare(std::move(c)) {}
+  const Compare &comp() const { return compare; }
+  Compare compare;
+};
+
+// A mechanism for opting out of checked_compare for use only in btree_test.cc.
+struct BtreeTestOnlyCheckedCompareOptOutBase {};
+
+// A helper class to adapt the specified comparator for two use cases:
+// (1) When using common Abseil string types with common comparison functors,
+// convert a boolean comparison into a three-way comparison that returns an
+// `absl::weak_ordering`. This helper class is specialized for
+// less<std::string>, greater<std::string>, less<string_view>,
+// greater<string_view>, less<absl::Cord>, and greater<absl::Cord>.
+// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see
+// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever
+// a comparison is made, we will make assertions to verify that the comparator
+// is valid.
+template <typename Compare, typename Key>
+struct key_compare_adapter {
+  // Inherit from checked_compare_base to support function pointers and also
+  // keep empty-base-optimization (EBO) support for classes.
+  // Note: we can't use CompressedTuple here because that would interfere
+  // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a
+  // CompressedTuple and nested `CompressedTuple`s don't support EBO.
+  // TODO(b/214288561): use CompressedTuple instead once it supports EBO for
+  // nested `CompressedTuple`s.
+  struct checked_compare : checked_compare_base<Compare> {
+   private:
+    using Base = typename checked_compare::checked_compare_base;
+    using Base::comp;
+
+    // If possible, returns whether `t` is equivalent to itself. We can only do
+    // this for `Key`s because we can't be sure that it's safe to call
+    // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a
+    // compilation failure inside the implementation of the comparison operator.
+    bool is_self_equivalent(const Key &k) const {
+      // Note: this works for both boolean and three-way comparators.
+      return comp()(k, k) == 0;
+    }
+    // If we can't compare `t` with itself, returns true unconditionally.
+    template <typename T>
+    bool is_self_equivalent(const T &) const {
+      return true;
+    }
+
+   public:
+    using Base::Base;
+    checked_compare(Compare comp) : Base(std::move(comp)) {}  // NOLINT
+
+    // Allow converting to Compare for use in key_comp()/value_comp().
+    explicit operator Compare() const { return comp(); }
+
+    template <typename T, typename U,
+              absl::enable_if_t<
+                  std::is_same<bool, compare_result_t<Compare, T, U>>::value,
+                  int> = 0>
+    bool operator()(const T &lhs, const U &rhs) const {
+      // NOTE: if any of these assertions fail, then the comparator does not
+      // establish a strict-weak-ordering (see
+      // https://en.cppreference.com/w/cpp/named_req/Compare).
+      assert(is_self_equivalent(lhs));
+      assert(is_self_equivalent(rhs));
+      const bool lhs_comp_rhs = comp()(lhs, rhs);
+      assert(!lhs_comp_rhs || !comp()(rhs, lhs));
+      return lhs_comp_rhs;
+    }
+
+    template <
+        typename T, typename U,
+        absl::enable_if_t<std::is_convertible<compare_result_t<Compare, T, U>,
+                                              absl::weak_ordering>::value,
+                          int> = 0>
+    absl::weak_ordering operator()(const T &lhs, const U &rhs) const {
+      // NOTE: if any of these assertions fail, then the comparator does not
+      // establish a strict-weak-ordering (see
+      // https://en.cppreference.com/w/cpp/named_req/Compare).
+      assert(is_self_equivalent(lhs));
+      assert(is_self_equivalent(rhs));
+      const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs);
+#ifndef NDEBUG
+      const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs);
+      if (lhs_comp_rhs > 0) {
+        assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0");
+      } else if (lhs_comp_rhs == 0) {
+        assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0");
+      } else {
+        assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
+      }
+#endif
+      return lhs_comp_rhs;
+    }
+  };
+  using type = absl::conditional_t<
+      std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase, Compare>::value,
+      Compare, checked_compare>;
 };
 
 template <>
-struct key_compare_to_adapter<std::less<std::string>> {
+struct key_compare_adapter<std::less<std::string>, std::string> {
   using type = StringBtreeDefaultLess;
 };
 
 template <>
-struct key_compare_to_adapter<std::greater<std::string>> {
+struct key_compare_adapter<std::greater<std::string>, std::string> {
   using type = StringBtreeDefaultGreater;
 };
 
 template <>
-struct key_compare_to_adapter<std::less<absl::string_view>> {
+struct key_compare_adapter<std::less<absl::string_view>, absl::string_view> {
   using type = StringBtreeDefaultLess;
 };
 
 template <>
-struct key_compare_to_adapter<std::greater<absl::string_view>> {
+struct key_compare_adapter<std::greater<absl::string_view>, absl::string_view> {
   using type = StringBtreeDefaultGreater;
 };
 
 template <>
-struct key_compare_to_adapter<std::less<absl::Cord>> {
+struct key_compare_adapter<std::less<absl::Cord>, absl::Cord> {
   using type = StringBtreeDefaultLess;
 };
 
 template <>
-struct key_compare_to_adapter<std::greater<absl::Cord>> {
+struct key_compare_adapter<std::greater<absl::Cord>, absl::Cord> {
   using type = StringBtreeDefaultGreater;
 };
 
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+//  For example, this would be useful for key types that wrap an integer
+//  and define their own cheap operator<(). For example:
+//
+//   class K {
+//    public:
+//     using absl_btree_prefer_linear_node_search = std::true_type;
+//     ...
+//    private:
+//     friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+//     int k_;
+//   };
+//
+//   btree_map<K, V> m;  // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+    T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+    : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+    T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+    : T::absl_btree_prefer_linear_node_search {};
+
+template <typename Compare, typename Key>
+constexpr bool compare_has_valid_result_type() {
+  using compare_result_type = compare_result_t<Compare, Key, Key>;
+  return std::is_same<compare_result_type, bool>::value ||
+         std::is_convertible<compare_result_type, absl::weak_ordering>::value;
+}
+
+template <typename original_key_compare, typename value_type>
+class map_value_compare {
+  template <typename Params>
+  friend class btree;
+
+  // Note: this `protected` is part of the API of std::map::value_compare. See
+  // https://en.cppreference.com/w/cpp/container/map/value_compare.
+ protected:
+  explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {}
+
+  original_key_compare comp;  // NOLINT
+
+ public:
+  auto operator()(const value_type &lhs, const value_type &rhs) const
+      -> decltype(comp(lhs.first, rhs.first)) {
+    return comp(lhs.first, rhs.first);
+  }
+};
+
 template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
-          bool Multi, typename SlotPolicy>
-struct common_params {
+          bool IsMulti, bool IsMap, typename SlotPolicy>
+struct common_params : common_policy_traits<SlotPolicy> {
+  using original_key_compare = Compare;
+
   // If Compare is a common comparator for a string-like type, then we adapt it
   // to use heterogeneous lookup and to be a key-compare-to comparator.
-  using key_compare = typename key_compare_to_adapter<Compare>::type;
-  // True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
-  using is_key_compare_adapted =
-      absl::negation<std::is_same<key_compare, Compare>>;
+  // We also adapt the comparator to diagnose invalid comparators in debug mode.
+  // We disable this when `Compare` is invalid in a way that will cause
+  // adaptation to fail (having invalid return type) so that we can give a
+  // better compilation failure in static_assert_validation. If we don't do
+  // this, then there will be cascading compilation failures that are confusing
+  // for users.
+  using key_compare =
+      absl::conditional_t<!compare_has_valid_result_type<Compare, Key>(),
+                          Compare,
+                          typename key_compare_adapter<Compare, Key>::type>;
+
+  static constexpr bool kIsKeyCompareStringAdapted =
+      std::is_same<key_compare, StringBtreeDefaultLess>::value ||
+      std::is_same<key_compare, StringBtreeDefaultGreater>::value;
+  static constexpr bool kIsKeyCompareTransparent =
+      IsTransparent<original_key_compare>::value || kIsKeyCompareStringAdapted;
+
   // A type which indicates if we have a key-compare-to functor or a plain old
   // key-compare functor.
   using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
 
   using allocator_type = Alloc;
   using key_type = Key;
-  using size_type = std::make_signed<size_t>::type;
+  using size_type = size_t;
   using difference_type = ptrdiff_t;
 
-  // True if this is a multiset or multimap.
-  using is_multi_container = std::integral_constant<bool, Multi>;
-
   using slot_policy = SlotPolicy;
   using slot_type = typename slot_policy::slot_type;
   using value_type = typename slot_policy::value_type;
@@ -212,150 +403,42 @@
   using reference = value_type &;
   using const_reference = const value_type &;
 
+  using value_compare =
+      absl::conditional_t<IsMap,
+                          map_value_compare<original_key_compare, value_type>,
+                          original_key_compare>;
+  using is_map_container = std::integral_constant<bool, IsMap>;
+
+  // For the given lookup key type, returns whether we can have multiple
+  // equivalent keys in the btree. If this is a multi-container, then we can.
+  // Otherwise, we can have multiple equivalent keys only if all of the
+  // following conditions are met:
+  // - The comparator is transparent.
+  // - The lookup key type is not the same as key_type.
+  // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
+  //   that we know has the same equivalence classes for all lookup types.
+  template <typename LookupKey>
+  constexpr static bool can_have_multiple_equivalent_keys() {
+    return IsMulti || (IsTransparent<key_compare>::value &&
+                       !std::is_same<LookupKey, Key>::value &&
+                       !kIsKeyCompareStringAdapted);
+  }
+
   enum {
     kTargetNodeSize = TargetNodeSize,
 
-    // Upper bound for the available space for values. This is largest for leaf
+    // Upper bound for the available space for slots. This is largest for leaf
     // nodes, which have overhead of at least a pointer + 4 bytes (for storing
     // 3 field_types and an enum).
-    kNodeValueSpace =
-        TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
+    kNodeSlotSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
   };
 
-  // This is an integral type large enough to hold as many
-  // ValueSize-values as will fit a node of TargetNodeSize bytes.
+  // This is an integral type large enough to hold as many slots as will fit a
+  // node of TargetNodeSize bytes.
   using node_count_type =
-      absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+      absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) >
                            (std::numeric_limits<uint8_t>::max)()),
                           uint16_t, uint8_t>;  // NOLINT
-
-  // The following methods are necessary for passing this struct as PolicyTraits
-  // for node_handle and/or are used within btree.
-  static value_type &element(slot_type *slot) {
-    return slot_policy::element(slot);
-  }
-  static const value_type &element(const slot_type *slot) {
-    return slot_policy::element(slot);
-  }
-  template <class... Args>
-  static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
-    slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
-  }
-  static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
-    slot_policy::construct(alloc, slot, other);
-  }
-  static void destroy(Alloc *alloc, slot_type *slot) {
-    slot_policy::destroy(alloc, slot);
-  }
-  static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
-    construct(alloc, new_slot, old_slot);
-    destroy(alloc, old_slot);
-  }
-  static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
-    slot_policy::swap(alloc, a, b);
-  }
-  static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
-    slot_policy::move(alloc, src, dest);
-  }
-};
-
-// A parameters structure for holding the type parameters for a btree_map.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Data, typename Compare, typename Alloc,
-          int TargetNodeSize, bool Multi>
-struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
-                                  map_slot_policy<Key, Data>> {
-  using super_type = typename map_params::common_params;
-  using mapped_type = Data;
-  // This type allows us to move keys when it is safe to do so. It is safe
-  // for maps in which value_type and mutable_value_type are layout compatible.
-  using slot_policy = typename super_type::slot_policy;
-  using slot_type = typename super_type::slot_type;
-  using value_type = typename super_type::value_type;
-  using init_type = typename super_type::init_type;
-
-  using key_compare = typename super_type::key_compare;
-  // Inherit from key_compare for empty base class optimization.
-  struct value_compare : private key_compare {
-    value_compare() = default;
-    explicit value_compare(const key_compare &cmp) : key_compare(cmp) {}
-
-    template <typename T, typename U>
-    auto operator()(const T &left, const U &right) const
-        -> decltype(std::declval<key_compare>()(left.first, right.first)) {
-      return key_compare::operator()(left.first, right.first);
-    }
-  };
-  using is_map_container = std::true_type;
-
-  template <typename V>
-  static auto key(const V &value) -> decltype(value.first) {
-    return value.first;
-  }
-  static const Key &key(const slot_type *s) { return slot_policy::key(s); }
-  static const Key &key(slot_type *s) { return slot_policy::key(s); }
-  // For use in node handle.
-  static auto mutable_key(slot_type *s)
-      -> decltype(slot_policy::mutable_key(s)) {
-    return slot_policy::mutable_key(s);
-  }
-  static mapped_type &value(value_type *value) { return value->second; }
-};
-
-// This type implements the necessary functions from the
-// absl::container_internal::slot_type interface.
-template <typename Key>
-struct set_slot_policy {
-  using slot_type = Key;
-  using value_type = Key;
-  using mutable_value_type = Key;
-
-  static value_type &element(slot_type *slot) { return *slot; }
-  static const value_type &element(const slot_type *slot) { return *slot; }
-
-  template <typename Alloc, class... Args>
-  static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
-    absl::allocator_traits<Alloc>::construct(*alloc, slot,
-                                             std::forward<Args>(args)...);
-  }
-
-  template <typename Alloc>
-  static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
-    absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
-  }
-
-  template <typename Alloc>
-  static void destroy(Alloc *alloc, slot_type *slot) {
-    absl::allocator_traits<Alloc>::destroy(*alloc, slot);
-  }
-
-  template <typename Alloc>
-  static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
-    using std::swap;
-    swap(*a, *b);
-  }
-
-  template <typename Alloc>
-  static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
-    *dest = std::move(*src);
-  }
-};
-
-// A parameters structure for holding the type parameters for a btree_set.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
-          bool Multi>
-struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
-                                  set_slot_policy<Key>> {
-  using value_type = Key;
-  using slot_type = typename set_params::common_params::slot_type;
-  using value_compare = typename set_params::common_params::key_compare;
-  using is_map_container = std::false_type;
-
-  template <typename V>
-  static const V &key(const V &value) { return value; }
-  static const Key &key(const slot_type *slot) { return *slot; }
-  static const Key &key(slot_type *slot) { return *slot; }
 };
 
 // An adapter class that converts a lower-bound compare into an upper-bound
@@ -391,6 +474,10 @@
 // useful information.
 template <typename V>
 struct SearchResult<V, false> {
+  SearchResult() {}
+  explicit SearchResult(V v) : value(v) {}
+  SearchResult(V v, MatchKind /*match*/) : value(v) {}
+
   V value;
 
   static constexpr bool HasMatch() { return false; }
@@ -403,10 +490,10 @@
 template <typename Params>
 class btree_node {
   using is_key_compare_to = typename Params::is_key_compare_to;
-  using is_multi_container = typename Params::is_multi_container;
   using field_type = typename Params::node_count_type;
   using allocator_type = typename Params::allocator_type;
   using slot_type = typename Params::slot_type;
+  using original_key_compare = typename Params::original_key_compare;
 
  public:
   using params_type = Params;
@@ -421,21 +508,35 @@
   using difference_type = typename Params::difference_type;
 
   // Btree decides whether to use linear node search as follows:
+  //   - If the comparator expresses a preference, use that.
+  //   - If the key expresses a preference, use that.
   //   - If the key is arithmetic and the comparator is std::less or
   //     std::greater, choose linear.
   //   - Otherwise, choose binary.
   // TODO(ezb): Might make sense to add condition(s) based on node-size.
   using use_linear_search = std::integral_constant<
-      bool,
-                std::is_arithmetic<key_type>::value &&
-                    (std::is_same<std::less<key_type>, key_compare>::value ||
-                     std::is_same<std::greater<key_type>, key_compare>::value)>;
+      bool, has_linear_node_search_preference<original_key_compare>::value
+                ? prefers_linear_node_search<original_key_compare>::value
+            : has_linear_node_search_preference<key_type>::value
+                ? prefers_linear_node_search<key_type>::value
+                : std::is_arithmetic<key_type>::value &&
+                      (std::is_same<std::less<key_type>,
+                                    original_key_compare>::value ||
+                       std::is_same<std::greater<key_type>,
+                                    original_key_compare>::value)>;
 
-  // This class is organized by gtl::Layout as if it had the following
-  // structure:
+  // This class is organized by absl::container_internal::Layout as if it had
+  // the following structure:
   //   // A pointer to the node's parent.
   //   btree_node *parent;
   //
+  //   // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a
+  //   // generation integer in order to check that when iterators are
+  //   // used, they haven't been invalidated already. Only the generation on
+  //   // the root is used, but we have one on each node because whether a node
+  //   // is root or not can change.
+  //   uint32_t generation;
+  //
   //   // The position of the node in the node's parent.
   //   field_type position;
   //   // The index of the first populated value in `values`.
@@ -446,23 +547,23 @@
   //   // is the same as the count of values.
   //   field_type finish;
   //   // The maximum number of values the node can hold. This is an integer in
-  //   // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf
+  //   // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
   //   // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
-  //   // nodes (even though there are still kNodeValues values in the node).
+  //   // nodes (even though there are still kNodeSlots values in the node).
   //   // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
   //   // to free extra bits for is_root, etc.
   //   field_type max_count;
   //
   //   // The array of values. The capacity is `max_count` for leaf nodes and
-  //   // kNodeValues for internal nodes. Only the values in
+  //   // kNodeSlots for internal nodes. Only the values in
   //   // [start, finish) have been initialized and are valid.
   //   slot_type values[max_count];
   //
   //   // The array of child pointers. The keys in children[i] are all less
   //   // than key(i). The keys in children[i + 1] are all greater than key(i).
-  //   // There are 0 children for leaf nodes and kNodeValues + 1 children for
+  //   // There are 0 children for leaf nodes and kNodeSlots + 1 children for
   //   // internal nodes.
-  //   btree_node *children[kNodeValues + 1];
+  //   btree_node *children[kNodeSlots + 1];
   //
   // This class is only constructed by EmptyNodeType. Normally, pointers to the
   // layout above are allocated, cast to btree_node*, and de-allocated within
@@ -482,59 +583,71 @@
   btree_node() = default;
 
  private:
-  using layout_type = absl::container_internal::Layout<btree_node *, field_type,
-                                                       slot_type, btree_node *>;
-  constexpr static size_type SizeWithNValues(size_type n) {
-    return layout_type(/*parent*/ 1,
-                       /*position, start, finish, max_count*/ 4,
-                       /*values*/ n,
-                       /*children*/ 0)
+  using layout_type =
+      absl::container_internal::Layout<btree_node *, uint32_t, field_type,
+                                       slot_type, btree_node *>;
+  constexpr static size_type SizeWithNSlots(size_type n) {
+    return layout_type(
+               /*parent*/ 1,
+               /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
+               /*position, start, finish, max_count*/ 4,
+               /*slots*/ n,
+               /*children*/ 0)
         .AllocSize();
   }
-  // A lower bound for the overhead of fields other than values in a leaf node.
+  // A lower bound for the overhead of fields other than slots in a leaf node.
   constexpr static size_type MinimumOverhead() {
-    return SizeWithNValues(1) - sizeof(value_type);
+    return SizeWithNSlots(1) - sizeof(slot_type);
   }
 
   // Compute how many values we can fit onto a leaf node taking into account
   // padding.
-  constexpr static size_type NodeTargetValues(const int begin, const int end) {
+  constexpr static size_type NodeTargetSlots(const size_type begin,
+                                             const size_type end) {
     return begin == end ? begin
-                        : SizeWithNValues((begin + end) / 2 + 1) >
-                                  params_type::kTargetNodeSize
-                              ? NodeTargetValues(begin, (begin + end) / 2)
-                              : NodeTargetValues((begin + end) / 2 + 1, end);
+           : SizeWithNSlots((begin + end) / 2 + 1) >
+                   params_type::kTargetNodeSize
+               ? NodeTargetSlots(begin, (begin + end) / 2)
+               : NodeTargetSlots((begin + end) / 2 + 1, end);
   }
 
-  enum {
-    kTargetNodeSize = params_type::kTargetNodeSize,
-    kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize),
+  constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize;
+  constexpr static size_type kNodeTargetSlots =
+      NodeTargetSlots(0, kTargetNodeSize);
 
-    // We need a minimum of 3 values per internal node in order to perform
-    // splitting (1 value for the two nodes involved in the split and 1 value
-    // propagated to the parent as the delimiter for the split).
-    kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3,
+  // We need a minimum of 3 slots per internal node in order to perform
+  // splitting (1 value for the two nodes involved in the split and 1 value
+  // propagated to the parent as the delimiter for the split). For performance
+  // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy of
+  // 1/3 (for a node, not a b-tree).
+  constexpr static size_type kMinNodeSlots = 4;
 
-    // The node is internal (i.e. is not a leaf node) if and only if `max_count`
-    // has this value.
-    kInternalNodeMaxCount = 0,
-  };
+  constexpr static size_type kNodeSlots =
+      kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots;
 
-  // Leaves can have less than kNodeValues values.
-  constexpr static layout_type LeafLayout(const int max_values = kNodeValues) {
-    return layout_type(/*parent*/ 1,
-                       /*position, start, finish, max_count*/ 4,
-                       /*values*/ max_values,
-                       /*children*/ 0);
+  // The node is internal (i.e. is not a leaf node) if and only if `max_count`
+  // has this value.
+  constexpr static field_type kInternalNodeMaxCount = 0;
+
+  constexpr static layout_type Layout(const size_type slot_count,
+                                      const size_type child_count) {
+    return layout_type(
+        /*parent*/ 1,
+        /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
+        /*position, start, finish, max_count*/ 4,
+        /*slots*/ slot_count,
+        /*children*/ child_count);
+  }
+  // Leaves can have less than kNodeSlots values.
+  constexpr static layout_type LeafLayout(
+      const size_type slot_count = kNodeSlots) {
+    return Layout(slot_count, 0);
   }
   constexpr static layout_type InternalLayout() {
-    return layout_type(/*parent*/ 1,
-                       /*position, start, finish, max_count*/ 4,
-                       /*values*/ kNodeValues,
-                       /*children*/ kNodeValues + 1);
+    return Layout(kNodeSlots, kNodeSlots + 1);
   }
-  constexpr static size_type LeafSize(const int max_values = kNodeValues) {
-    return LeafLayout(max_values).AllocSize();
+  constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) {
+    return LeafLayout(slot_count).AllocSize();
   }
   constexpr static size_type InternalSize() {
     return InternalLayout().AllocSize();
@@ -545,44 +658,47 @@
   template <size_type N>
   inline typename layout_type::template ElementType<N> *GetField() {
     // We assert that we don't read from values that aren't there.
-    assert(N < 3 || !leaf());
+    assert(N < 4 || is_internal());
     return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
   }
   template <size_type N>
   inline const typename layout_type::template ElementType<N> *GetField() const {
-    assert(N < 3 || !leaf());
+    assert(N < 4 || is_internal());
     return InternalLayout().template Pointer<N>(
         reinterpret_cast<const char *>(this));
   }
   void set_parent(btree_node *p) { *GetField<0>() = p; }
-  field_type &mutable_finish() { return GetField<1>()[2]; }
-  slot_type *slot(int i) { return &GetField<2>()[i]; }
+  field_type &mutable_finish() { return GetField<2>()[2]; }
+  slot_type *slot(size_type i) { return &GetField<3>()[i]; }
   slot_type *start_slot() { return slot(start()); }
   slot_type *finish_slot() { return slot(finish()); }
-  const slot_type *slot(int i) const { return &GetField<2>()[i]; }
-  void set_position(field_type v) { GetField<1>()[0] = v; }
-  void set_start(field_type v) { GetField<1>()[1] = v; }
-  void set_finish(field_type v) { GetField<1>()[2] = v; }
+  const slot_type *slot(size_type i) const { return &GetField<3>()[i]; }
+  void set_position(field_type v) { GetField<2>()[0] = v; }
+  void set_start(field_type v) { GetField<2>()[1] = v; }
+  void set_finish(field_type v) { GetField<2>()[2] = v; }
   // This method is only called by the node init methods.
-  void set_max_count(field_type v) { GetField<1>()[3] = v; }
+  void set_max_count(field_type v) { GetField<2>()[3] = v; }
 
  public:
   // Whether this is a leaf node or not. This value doesn't change after the
   // node is created.
-  bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+  bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; }
+  // Whether this is an internal node or not. This value doesn't change after
+  // the node is created.
+  bool is_internal() const { return !is_leaf(); }
 
   // Getter for the position of this node in its parent.
-  field_type position() const { return GetField<1>()[0]; }
+  field_type position() const { return GetField<2>()[0]; }
 
   // Getter for the offset of the first value in the `values` array.
   field_type start() const {
-    // TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
-    assert(GetField<1>()[1] == 0);
+    // TODO(ezb): when floating storage is implemented, return GetField<2>()[1];
+    assert(GetField<2>()[1] == 0);
     return 0;
   }
 
   // Getter for the offset after the last value in the `values` array.
-  field_type finish() const { return GetField<1>()[2]; }
+  field_type finish() const { return GetField<2>()[2]; }
 
   // Getters for the number of values stored in this node.
   field_type count() const {
@@ -591,10 +707,10 @@
   }
   field_type max_count() const {
     // Internal nodes have max_count==kInternalNodeMaxCount.
-    // Leaf nodes have max_count in [1, kNodeValues].
-    const field_type max_count = GetField<1>()[3];
+    // Leaf nodes have max_count in [1, kNodeSlots].
+    const field_type max_count = GetField<2>()[3];
     return max_count == field_type{kInternalNodeMaxCount}
-               ? field_type{kNodeValues}
+               ? field_type{kNodeSlots}
                : max_count;
   }
 
@@ -603,58 +719,86 @@
   // Getter for whether the node is the root of the tree. The parent of the
   // root of the tree is the leftmost node in the tree which is guaranteed to
   // be a leaf.
-  bool is_root() const { return parent()->leaf(); }
+  bool is_root() const { return parent()->is_leaf(); }
   void make_root() {
     assert(parent()->is_root());
+    set_generation(parent()->generation());
     set_parent(parent()->parent());
   }
 
+  // Gets the root node's generation integer, which is the one used by the tree.
+  uint32_t *get_root_generation() const {
+    assert(BtreeGenerationsEnabled());
+    const btree_node *curr = this;
+    for (; !curr->is_root(); curr = curr->parent()) continue;
+    return const_cast<uint32_t *>(&curr->GetField<1>()[0]);
+  }
+
+  // Returns the generation for iterator validation.
+  uint32_t generation() const {
+    return BtreeGenerationsEnabled() ? *get_root_generation() : 0;
+  }
+  // Updates generation. Should only be called on a root node or during node
+  // initialization.
+  void set_generation(uint32_t generation) {
+    if (BtreeGenerationsEnabled()) GetField<1>()[0] = generation;
+  }
+  // Updates the generation. We do this whenever the node is mutated.
+  void next_generation() {
+    if (BtreeGenerationsEnabled()) ++*get_root_generation();
+  }
+
   // Getters for the key/value at position i in the node.
-  const key_type &key(int i) const { return params_type::key(slot(i)); }
-  reference value(int i) { return params_type::element(slot(i)); }
-  const_reference value(int i) const { return params_type::element(slot(i)); }
+  const key_type &key(size_type i) const { return params_type::key(slot(i)); }
+  reference value(size_type i) { return params_type::element(slot(i)); }
+  const_reference value(size_type i) const {
+    return params_type::element(slot(i));
+  }
 
   // Getters/setter for the child at position i in the node.
-  btree_node *child(int i) const { return GetField<3>()[i]; }
+  btree_node *child(field_type i) const { return GetField<4>()[i]; }
   btree_node *start_child() const { return child(start()); }
-  btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
-  void clear_child(int i) {
+  btree_node *&mutable_child(field_type i) { return GetField<4>()[i]; }
+  void clear_child(field_type i) {
     absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
   }
-  void set_child(int i, btree_node *c) {
+  void set_child_noupdate_position(field_type i, btree_node *c) {
     absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
     mutable_child(i) = c;
+  }
+  void set_child(field_type i, btree_node *c) {
+    set_child_noupdate_position(i, c);
     c->set_position(i);
   }
-  void init_child(int i, btree_node *c) {
+  void init_child(field_type i, btree_node *c) {
     set_child(i, c);
     c->set_parent(this);
   }
 
   // Returns the position of the first value whose key is not less than k.
   template <typename K>
-  SearchResult<int, is_key_compare_to::value> lower_bound(
+  SearchResult<size_type, is_key_compare_to::value> lower_bound(
       const K &k, const key_compare &comp) const {
     return use_linear_search::value ? linear_search(k, comp)
                                     : binary_search(k, comp);
   }
   // Returns the position of the first value whose key is greater than k.
   template <typename K>
-  int upper_bound(const K &k, const key_compare &comp) const {
+  size_type upper_bound(const K &k, const key_compare &comp) const {
     auto upper_compare = upper_bound_adapter<key_compare>(comp);
     return use_linear_search::value ? linear_search(k, upper_compare).value
                                     : binary_search(k, upper_compare).value;
   }
 
   template <typename K, typename Compare>
-  SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+  SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value>
   linear_search(const K &k, const Compare &comp) const {
     return linear_search_impl(k, start(), finish(), comp,
                               btree_is_key_compare_to<Compare, key_type>());
   }
 
   template <typename K, typename Compare>
-  SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+  SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value>
   binary_search(const K &k, const Compare &comp) const {
     return binary_search_impl(k, start(), finish(), comp,
                               btree_is_key_compare_to<Compare, key_type>());
@@ -663,8 +807,8 @@
   // Returns the position of the first value whose key is not less than k using
   // linear search performed using plain compare.
   template <typename K, typename Compare>
-  SearchResult<int, false> linear_search_impl(
-      const K &k, int s, const int e, const Compare &comp,
+  SearchResult<size_type, false> linear_search_impl(
+      const K &k, size_type s, const size_type e, const Compare &comp,
       std::false_type /* IsCompareTo */) const {
     while (s < e) {
       if (!comp(key(s), k)) {
@@ -672,14 +816,14 @@
       }
       ++s;
     }
-    return {s};
+    return SearchResult<size_type, false>{s};
   }
 
   // Returns the position of the first value whose key is not less than k using
   // linear search performed using compare-to.
   template <typename K, typename Compare>
-  SearchResult<int, true> linear_search_impl(
-      const K &k, int s, const int e, const Compare &comp,
+  SearchResult<size_type, true> linear_search_impl(
+      const K &k, size_type s, const size_type e, const Compare &comp,
       std::true_type /* IsCompareTo */) const {
     while (s < e) {
       const absl::weak_ordering c = comp(key(s), k);
@@ -696,30 +840,30 @@
   // Returns the position of the first value whose key is not less than k using
   // binary search performed using plain compare.
   template <typename K, typename Compare>
-  SearchResult<int, false> binary_search_impl(
-      const K &k, int s, int e, const Compare &comp,
+  SearchResult<size_type, false> binary_search_impl(
+      const K &k, size_type s, size_type e, const Compare &comp,
       std::false_type /* IsCompareTo */) const {
     while (s != e) {
-      const int mid = (s + e) >> 1;
+      const size_type mid = (s + e) >> 1;
       if (comp(key(mid), k)) {
         s = mid + 1;
       } else {
         e = mid;
       }
     }
-    return {s};
+    return SearchResult<size_type, false>{s};
   }
 
   // Returns the position of the first value whose key is not less than k using
   // binary search performed using compare-to.
   template <typename K, typename CompareTo>
-  SearchResult<int, true> binary_search_impl(
-      const K &k, int s, int e, const CompareTo &comp,
+  SearchResult<size_type, true> binary_search_impl(
+      const K &k, size_type s, size_type e, const CompareTo &comp,
       std::true_type /* IsCompareTo */) const {
-    if (is_multi_container::value) {
+    if (params_type::template can_have_multiple_equivalent_keys<K>()) {
       MatchKind exact_match = MatchKind::kNe;
       while (s != e) {
-        const int mid = (s + e) >> 1;
+        const size_type mid = (s + e) >> 1;
         const absl::weak_ordering c = comp(key(mid), k);
         if (c < 0) {
           s = mid + 1;
@@ -727,16 +871,16 @@
           e = mid;
           if (c == 0) {
             // Need to return the first value whose key is not less than k,
-            // which requires continuing the binary search if this is a
-            // multi-container.
+            // which requires continuing the binary search if there could be
+            // multiple equivalent keys.
             exact_match = MatchKind::kEq;
           }
         }
       }
       return {s, exact_match};
-    } else {  // Not a multi-container.
+    } else {  // Can't have multiple equivalent keys.
       while (s != e) {
-        const int mid = (s + e) >> 1;
+        const size_type mid = (s + e) >> 1;
         const absl::weak_ordering c = comp(key(mid), k);
         if (c < 0) {
           s = mid + 1;
@@ -750,10 +894,42 @@
     }
   }
 
+  // Returns whether key i is ordered correctly with respect to the other keys
+  // in the node. The motivation here is to detect comparators that violate
+  // transitivity. Note: we only do comparisons of keys on this node rather than
+  // the whole tree so that this is constant time.
+  template <typename Compare>
+  bool is_ordered_correctly(field_type i, const Compare &comp) const {
+    if (std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase,
+                        Compare>::value ||
+        params_type::kIsKeyCompareStringAdapted) {
+      return true;
+    }
+
+    const auto compare = [&](field_type a, field_type b) {
+      const absl::weak_ordering cmp =
+          compare_internal::do_three_way_comparison(comp, key(a), key(b));
+      return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+    };
+    int cmp = -1;
+    constexpr bool kCanHaveEquivKeys =
+        params_type::template can_have_multiple_equivalent_keys<key_type>();
+    for (field_type j = start(); j < finish(); ++j) {
+      if (j == i) {
+        if (cmp > 0) return false;
+        continue;
+      }
+      int new_cmp = compare(j, i);
+      if (new_cmp < cmp || (!kCanHaveEquivKeys && new_cmp == 0)) return false;
+      cmp = new_cmp;
+    }
+    return true;
+  }
+
   // Emplaces a value at position i, shifting all existing values and
   // children at positions >= i to the right by 1.
   template <typename... Args>
-  void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
+  void emplace_value(field_type i, allocator_type *alloc, Args &&...args);
 
   // Removes the values at positions [i, i + to_erase), shifting all existing
   // values and children after that range to the left by to_erase. Clears all
@@ -761,9 +937,9 @@
   void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
 
   // Rebalances a node with its right sibling.
-  void rebalance_right_to_left(int to_move, btree_node *right,
+  void rebalance_right_to_left(field_type to_move, btree_node *right,
                                allocator_type *alloc);
-  void rebalance_left_to_right(int to_move, btree_node *right,
+  void rebalance_left_to_right(field_type to_move, btree_node *right,
                                allocator_type *alloc);
 
   // Splits a node, moving a portion of the node's values to its right sibling.
@@ -774,50 +950,50 @@
   void merge(btree_node *src, allocator_type *alloc);
 
   // Node allocation/deletion routines.
-  void init_leaf(btree_node *parent, int max_count) {
+  void init_leaf(field_type position, field_type max_count,
+                 btree_node *parent) {
+    set_generation(0);
     set_parent(parent);
-    set_position(0);
+    set_position(position);
     set_start(0);
     set_finish(0);
     set_max_count(max_count);
     absl::container_internal::SanitizerPoisonMemoryRegion(
         start_slot(), max_count * sizeof(slot_type));
   }
-  void init_internal(btree_node *parent) {
-    init_leaf(parent, kNodeValues);
+  void init_internal(field_type position, btree_node *parent) {
+    init_leaf(position, kNodeSlots, parent);
     // Set `max_count` to a sentinel value to indicate that this node is
     // internal.
     set_max_count(kInternalNodeMaxCount);
     absl::container_internal::SanitizerPoisonMemoryRegion(
-        &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *));
+        &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
   }
 
   static void deallocate(const size_type size, btree_node *node,
                          allocator_type *alloc) {
+    absl::container_internal::SanitizerUnpoisonMemoryRegion(node, size);
     absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
   }
 
   // Deletes a node and all of its children.
   static void clear_and_delete(btree_node *node, allocator_type *alloc);
 
- public:
-  // Exposed only for tests.
-  static bool testonly_uses_linear_node_search() {
-    return use_linear_search::value;
-  }
-
  private:
   template <typename... Args>
-  void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
+  void value_init(const field_type i, allocator_type *alloc, Args &&...args) {
+    next_generation();
     absl::container_internal::SanitizerUnpoisonObject(slot(i));
     params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
   }
   void value_destroy(const field_type i, allocator_type *alloc) {
+    next_generation();
     params_type::destroy(alloc, slot(i));
     absl::container_internal::SanitizerPoisonObject(slot(i));
   }
   void value_destroy_n(const field_type i, const field_type n,
                        allocator_type *alloc) {
+    next_generation();
     for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
       params_type::destroy(alloc, s);
       absl::container_internal::SanitizerPoisonObject(s);
@@ -833,6 +1009,7 @@
   // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
   void transfer(const size_type dest_i, const size_type src_i,
                 btree_node *src_node, allocator_type *alloc) {
+    next_generation();
     transfer(slot(dest_i), src_node->slot(src_i), alloc);
   }
 
@@ -841,6 +1018,7 @@
   void transfer_n(const size_type n, const size_type dest_i,
                   const size_type src_i, btree_node *src_node,
                   allocator_type *alloc) {
+    next_generation();
     for (slot_type *src = src_node->slot(src_i), *end = src + n,
                    *dest = slot(dest_i);
          src != end; ++src, ++dest) {
@@ -853,26 +1031,87 @@
   void transfer_n_backward(const size_type n, const size_type dest_i,
                            const size_type src_i, btree_node *src_node,
                            allocator_type *alloc) {
-    for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
-                   *dest = slot(dest_i + n - 1);
+    next_generation();
+    for (slot_type *src = src_node->slot(src_i + n), *end = src - n,
+                   *dest = slot(dest_i + n);
          src != end; --src, --dest) {
-      transfer(dest, src, alloc);
+      // If we modified the loop index calculations above to avoid the -1s here,
+      // it would result in UB in the computation of `end` (and possibly `src`
+      // as well, if n == 0), since slot() is effectively an array index and it
+      // is UB to compute the address of any out-of-bounds array element except
+      // for one-past-the-end.
+      transfer(dest - 1, src - 1, alloc);
     }
   }
 
   template <typename P>
   friend class btree;
   template <typename N, typename R, typename P>
-  friend struct btree_iterator;
+  friend class btree_iterator;
   friend class BtreeNodePeer;
+  friend struct btree_access;
 };
 
-template <typename Node, typename Reference, typename Pointer>
-struct btree_iterator {
+template <typename Node>
+bool AreNodesFromSameContainer(const Node *node_a, const Node *node_b) {
+  // If either node is null, then give up on checking whether they're from the
+  // same container. (If exactly one is null, then we'll trigger the
+  // default-constructed assert in Equals.)
+  if (node_a == nullptr || node_b == nullptr) return true;
+  while (!node_a->is_root()) node_a = node_a->parent();
+  while (!node_b->is_root()) node_b = node_b->parent();
+  return node_a == node_b;
+}
+
+class btree_iterator_generation_info_enabled {
+ public:
+  explicit btree_iterator_generation_info_enabled(uint32_t g)
+      : generation_(g) {}
+
+  // Updates the generation. For use internally right before we return an
+  // iterator to the user.
+  template <typename Node>
+  void update_generation(const Node *node) {
+    if (node != nullptr) generation_ = node->generation();
+  }
+  uint32_t generation() const { return generation_; }
+
+  template <typename Node>
+  void assert_valid_generation(const Node *node) const {
+    if (node != nullptr && node->generation() != generation_) {
+      ABSL_INTERNAL_LOG(
+          FATAL,
+          "Attempting to use an invalidated iterator. The corresponding b-tree "
+          "container has been mutated since this iterator was constructed.");
+    }
+  }
+
  private:
+  // Used to check that the iterator hasn't been invalidated.
+  uint32_t generation_;
+};
+
+class btree_iterator_generation_info_disabled {
+ public:
+  explicit btree_iterator_generation_info_disabled(uint32_t) {}
+  static void update_generation(const void *) {}
+  static uint32_t generation() { return 0; }
+  static void assert_valid_generation(const void *) {}
+};
+
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+using btree_iterator_generation_info = btree_iterator_generation_info_enabled;
+#else
+using btree_iterator_generation_info = btree_iterator_generation_info_disabled;
+#endif
+
+template <typename Node, typename Reference, typename Pointer>
+class btree_iterator : private btree_iterator_generation_info {
+  using field_type = typename Node::field_type;
   using key_type = typename Node::key_type;
   using size_type = typename Node::size_type;
   using params_type = typename Node::params_type;
+  using is_map_container = typename params_type::is_map_container;
 
   using node_type = Node;
   using normal_node = typename std::remove_const<Node>::type;
@@ -884,7 +1123,7 @@
   using slot_type = typename params_type::slot_type;
 
   using iterator =
-      btree_iterator<normal_node, normal_reference, normal_pointer>;
+     btree_iterator<normal_node, normal_reference, normal_pointer>;
   using const_iterator =
       btree_iterator<const_node, const_reference, const_pointer>;
 
@@ -896,72 +1135,60 @@
   using reference = Reference;
   using iterator_category = std::bidirectional_iterator_tag;
 
-  btree_iterator() : node(nullptr), position(-1) {}
-  explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
-  btree_iterator(Node *n, int p) : node(n), position(p) {}
+  btree_iterator() : btree_iterator(nullptr, -1) {}
+  explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {}
+  btree_iterator(Node *n, int p)
+      : btree_iterator_generation_info(n != nullptr ? n->generation()
+                                                    : ~uint32_t{}),
+        node_(n),
+        position_(p) {}
 
   // NOTE: this SFINAE allows for implicit conversions from iterator to
-  // const_iterator, but it specifically avoids defining copy constructors so
-  // that btree_iterator can be trivially copyable. This is for performance and
-  // binary size reasons.
+  // const_iterator, but it specifically avoids hiding the copy constructor so
+  // that the trivial one will be used when possible.
   template <typename N, typename R, typename P,
             absl::enable_if_t<
                 std::is_same<btree_iterator<N, R, P>, iterator>::value &&
                     std::is_same<btree_iterator, const_iterator>::value,
                 int> = 0>
-  btree_iterator(const btree_iterator<N, R, P> &other)  // NOLINT
-      : node(other.node), position(other.position) {}
+  btree_iterator(const btree_iterator<N, R, P> other)  // NOLINT
+      : btree_iterator_generation_info(other),
+        node_(other.node_),
+        position_(other.position_) {}
 
- private:
-  // This SFINAE allows explicit conversions from const_iterator to
-  // iterator, but also avoids defining a copy constructor.
-  // NOTE: the const_cast is safe because this constructor is only called by
-  // non-const methods and the container owns the nodes.
-  template <typename N, typename R, typename P,
-            absl::enable_if_t<
-                std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
-                    std::is_same<btree_iterator, iterator>::value,
-                int> = 0>
-  explicit btree_iterator(const btree_iterator<N, R, P> &other)
-      : node(const_cast<node_type *>(other.node)), position(other.position) {}
-
-  // Increment/decrement the iterator.
-  void increment() {
-    if (node->leaf() && ++position < node->finish()) {
-      return;
-    }
-    increment_slow();
-  }
-  void increment_slow();
-
-  void decrement() {
-    if (node->leaf() && --position >= node->start()) {
-      return;
-    }
-    decrement_slow();
-  }
-  void decrement_slow();
-
- public:
   bool operator==(const iterator &other) const {
-    return node == other.node && position == other.position;
+    return Equals(other);
   }
   bool operator==(const const_iterator &other) const {
-    return node == other.node && position == other.position;
+    return Equals(other);
   }
   bool operator!=(const iterator &other) const {
-    return node != other.node || position != other.position;
+    return !Equals(other);
   }
   bool operator!=(const const_iterator &other) const {
-    return node != other.node || position != other.position;
+    return !Equals(other);
+  }
+
+  // Returns n such that n calls to ++other yields *this.
+  // Precondition: n exists.
+  difference_type operator-(const_iterator other) const {
+    if (node_ == other.node_) {
+      if (node_->is_leaf()) return position_ - other.position_;
+      if (position_ == other.position_) return 0;
+    }
+    return distance_slow(other);
   }
 
   // Accessors for the key/value the iterator is pointing at.
   reference operator*() const {
-    ABSL_HARDENING_ASSERT(node != nullptr);
-    ABSL_HARDENING_ASSERT(node->start() <= position);
-    ABSL_HARDENING_ASSERT(node->finish() > position);
-    return node->value(position);
+    ABSL_HARDENING_ASSERT(node_ != nullptr);
+    assert_valid_generation(node_);
+    ABSL_HARDENING_ASSERT(position_ >= node_->start());
+    if (position_ >= node_->finish()) {
+      ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator");
+      ABSL_HARDENING_ASSERT(position_ < node_->finish());
+    }
+    return node_->value(static_cast<field_type>(position_));
   }
   pointer operator->() const { return &operator*(); }
 
@@ -985,6 +1212,8 @@
   }
 
  private:
+  friend iterator;
+  friend const_iterator;
   template <typename Params>
   friend class btree;
   template <typename Tree>
@@ -995,36 +1224,106 @@
   friend class btree_map_container;
   template <typename Tree>
   friend class btree_multiset_container;
-  template <typename N, typename R, typename P>
-  friend struct btree_iterator;
   template <typename TreeType, typename CheckerType>
   friend class base_checker;
+  friend struct btree_access;
 
-  const key_type &key() const { return node->key(position); }
-  slot_type *slot() { return node->slot(position); }
+  // This SFINAE allows explicit conversions from const_iterator to
+  // iterator, but also avoids hiding the copy constructor.
+  // NOTE: the const_cast is safe because this constructor is only called by
+  // non-const methods and the container owns the nodes.
+  template <typename N, typename R, typename P,
+            absl::enable_if_t<
+                std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+                    std::is_same<btree_iterator, iterator>::value,
+                int> = 0>
+  explicit btree_iterator(const btree_iterator<N, R, P> other)
+      : btree_iterator_generation_info(other.generation()),
+        node_(const_cast<node_type *>(other.node_)),
+        position_(other.position_) {}
+
+  bool Equals(const const_iterator other) const {
+    ABSL_HARDENING_ASSERT(((node_ == nullptr && other.node_ == nullptr) ||
+                           (node_ != nullptr && other.node_ != nullptr)) &&
+                          "Comparing default-constructed iterator with "
+                          "non-default-constructed iterator.");
+    // Note: we use assert instead of ABSL_HARDENING_ASSERT here because this
+    // changes the complexity of Equals from O(1) to O(log(N) + log(M)) where
+    // N/M are sizes of the containers containing node_/other.node_.
+    assert(AreNodesFromSameContainer(node_, other.node_) &&
+           "Comparing iterators from different containers.");
+    assert_valid_generation(node_);
+    other.assert_valid_generation(other.node_);
+    return node_ == other.node_ && position_ == other.position_;
+  }
+
+  bool IsEndIterator() const {
+    if (position_ != node_->finish()) return false;
+    node_type *node = node_;
+    while (!node->is_root()) {
+      if (node->position() != node->parent()->finish()) return false;
+      node = node->parent();
+    }
+    return true;
+  }
+
+  // Returns n such that n calls to ++other yields *this.
+  // Precondition: n exists && (this->node_ != other.node_ ||
+  // !this->node_->is_leaf() || this->position_ != other.position_).
+  difference_type distance_slow(const_iterator other) const;
+
+  // Increment/decrement the iterator.
+  void increment() {
+    assert_valid_generation(node_);
+    if (node_->is_leaf() && ++position_ < node_->finish()) {
+      return;
+    }
+    increment_slow();
+  }
+  void increment_slow();
+
+  void decrement() {
+    assert_valid_generation(node_);
+    if (node_->is_leaf() && --position_ >= node_->start()) {
+      return;
+    }
+    decrement_slow();
+  }
+  void decrement_slow();
+
+  const key_type &key() const {
+    return node_->key(static_cast<size_type>(position_));
+  }
+  decltype(std::declval<Node *>()->slot(0)) slot() {
+    return node_->slot(static_cast<size_type>(position_));
+  }
+
+  void update_generation() {
+    btree_iterator_generation_info::update_generation(node_);
+  }
 
   // The node in the tree the iterator is pointing at.
-  Node *node;
+  Node *node_;
   // The position within the node of the tree the iterator is pointing at.
   // NOTE: this is an int rather than a field_type because iterators can point
   // to invalid positions (such as -1) in certain circumstances.
-  int position;
+  int position_;
 };
 
 template <typename Params>
 class btree {
   using node_type = btree_node<Params>;
   using is_key_compare_to = typename Params::is_key_compare_to;
-  using init_type = typename Params::init_type;
   using field_type = typename node_type::field_type;
-  using is_multi_container = typename Params::is_multi_container;
-  using is_key_compare_adapted = typename Params::is_key_compare_adapted;
 
   // We use a static empty node for the root/leftmost/rightmost of empty btrees
   // in order to avoid branching in begin()/end().
   struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
     using field_type = typename node_type::field_type;
     node_type *parent;
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+    uint32_t generation = 0;
+#endif
     field_type position = 0;
     field_type start = 0;
     field_type finish = 0;
@@ -1036,7 +1335,7 @@
     // MSVC has constexpr code generations bugs here.
     EmptyNodeType() : parent(this) {}
 #else
-    constexpr EmptyNodeType(node_type *p) : parent(p) {}
+    explicit constexpr EmptyNodeType(node_type *p) : parent(p) {}
 #endif
   };
 
@@ -1054,8 +1353,8 @@
   }
 
   enum : uint32_t {
-    kNodeValues = node_type::kNodeValues,
-    kMinNodeValues = kNodeValues / 2,
+    kNodeSlots = node_type::kNodeSlots,
+    kMinNodeValues = kNodeSlots / 2,
   };
 
   struct node_stats {
@@ -1079,13 +1378,15 @@
   using size_type = typename Params::size_type;
   using difference_type = typename Params::difference_type;
   using key_compare = typename Params::key_compare;
+  using original_key_compare = typename Params::original_key_compare;
   using value_compare = typename Params::value_compare;
   using allocator_type = typename Params::allocator_type;
   using reference = typename Params::reference;
   using const_reference = typename Params::const_reference;
   using pointer = typename Params::pointer;
   using const_pointer = typename Params::const_pointer;
-  using iterator = btree_iterator<node_type, reference, pointer>;
+  using iterator =
+      typename btree_iterator<node_type, reference, pointer>::iterator;
   using const_iterator = typename iterator::const_iterator;
   using reverse_iterator = std::reverse_iterator<iterator>;
   using const_reverse_iterator = std::reverse_iterator<const_iterator>;
@@ -1096,29 +1397,39 @@
   using slot_type = typename Params::slot_type;
 
  private:
-  // For use in copy_or_move_values_in_order.
-  const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
-  value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); }
-
   // Copies or moves (depending on the template parameter) the values in
   // other into this btree in their order in other. This btree must be empty
   // before this method is called. This method is used in copy construction,
   // copy assignment, and move assignment.
   template <typename Btree>
-  void copy_or_move_values_in_order(Btree *other);
+  void copy_or_move_values_in_order(Btree &other);
 
   // Validates that various assumptions/requirements are true at compile time.
   constexpr static bool static_assert_validation();
 
  public:
-  btree(const key_compare &comp, const allocator_type &alloc);
+  btree(const key_compare &comp, const allocator_type &alloc)
+      : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {}
 
-  btree(const btree &other);
+  btree(const btree &other) : btree(other, other.allocator()) {}
+  btree(const btree &other, const allocator_type &alloc)
+      : btree(other.key_comp(), alloc) {
+    copy_or_move_values_in_order(other);
+  }
   btree(btree &&other) noexcept
-      : root_(std::move(other.root_)),
-        rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
-        size_(absl::exchange(other.size_, 0)) {
-    other.mutable_root() = EmptyNode();
+      : root_(absl::exchange(other.root_, EmptyNode())),
+        rightmost_(std::move(other.rightmost_)),
+        size_(absl::exchange(other.size_, 0u)) {
+    other.mutable_rightmost() = EmptyNode();
+  }
+  btree(btree &&other, const allocator_type &alloc)
+      : btree(other.key_comp(), alloc) {
+    if (alloc == other.allocator()) {
+      swap(other);
+    } else {
+      // Move values from `other` one at a time when allocators are different.
+      copy_or_move_values_in_order(other);
+    }
   }
 
   ~btree() {
@@ -1134,9 +1445,9 @@
 
   iterator begin() { return iterator(leftmost()); }
   const_iterator begin() const { return const_iterator(leftmost()); }
-  iterator end() { return iterator(rightmost_, rightmost_->finish()); }
+  iterator end() { return iterator(rightmost(), rightmost()->finish()); }
   const_iterator end() const {
-    return const_iterator(rightmost_, rightmost_->finish());
+    return const_iterator(rightmost(), rightmost()->finish());
   }
   reverse_iterator rbegin() { return reverse_iterator(end()); }
   const_reverse_iterator rbegin() const {
@@ -1147,17 +1458,22 @@
     return const_reverse_iterator(begin());
   }
 
-  // Finds the first element whose key is not less than key.
+  // Finds the first element whose key is not less than `key`.
   template <typename K>
   iterator lower_bound(const K &key) {
-    return internal_end(internal_lower_bound(key));
+    return internal_end(internal_lower_bound(key).value);
   }
   template <typename K>
   const_iterator lower_bound(const K &key) const {
-    return internal_end(internal_lower_bound(key));
+    return internal_end(internal_lower_bound(key).value);
   }
 
-  // Finds the first element whose key is greater than key.
+  // Finds the first element whose key is not less than `key` and also returns
+  // whether that element is equal to `key`.
+  template <typename K>
+  std::pair<iterator, bool> lower_bound_equal(const K &key) const;
+
+  // Finds the first element whose key is greater than `key`.
   template <typename K>
   iterator upper_bound(const K &key) {
     return internal_end(internal_upper_bound(key));
@@ -1182,7 +1498,7 @@
   // Requirement: if `key` already exists in the btree, does not consume `args`.
   // Requirement: `key` is never referenced after consuming `args`.
   template <typename K, typename... Args>
-  std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
+  std::pair<iterator, bool> insert_unique(const K &key, Args &&...args);
 
   // Inserts with hint. Checks to see if the value should be placed immediately
   // before `position` in the tree. If so, then the insertion will take
@@ -1191,9 +1507,8 @@
   // Requirement: if `key` already exists in the btree, does not consume `args`.
   // Requirement: `key` is never referenced after consuming `args`.
   template <typename K, typename... Args>
-  std::pair<iterator, bool> insert_hint_unique(iterator position,
-                                               const K &key,
-                                               Args &&... args);
+  std::pair<iterator, bool> insert_hint_unique(iterator position, const K &key,
+                                               Args &&...args);
 
   // Insert a range of values into the btree.
   // Note: the first overload avoids constructing a value_type if the key
@@ -1227,7 +1542,8 @@
 
   // Insert a range of values into the btree.
   template <typename InputIterator>
-  void insert_iterator_multi(InputIterator b, InputIterator e);
+  void insert_iterator_multi(InputIterator b,
+                             InputIterator e);
 
   // Erase the specified iterator from the btree. The iterator must be valid
   // (i.e. not equal to end()).  Return an iterator pointing to the node after
@@ -1239,18 +1555,8 @@
   // to the element after the last erased element.
   std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
 
-  // Erases the specified key from the btree. Returns 1 if an element was
-  // erased and 0 otherwise.
-  template <typename K>
-  size_type erase_unique(const K &key);
-
-  // Erases all of the entries matching the specified key from the
-  // btree. Returns the number of elements erased.
-  template <typename K>
-  size_type erase_multi(const K &key);
-
-  // Finds the iterator corresponding to a key or returns end() if the key is
-  // not present.
+  // Finds an element with key equivalent to `key` or returns `end()` if `key`
+  // is not present.
   template <typename K>
   iterator find(const K &key) {
     return internal_end(internal_find(key));
@@ -1260,23 +1566,6 @@
     return internal_end(internal_find(key));
   }
 
-  // Returns a count of the number of times the key appears in the btree.
-  template <typename K>
-  size_type count_unique(const K &key) const {
-    const iterator begin = internal_find(key);
-    if (begin.node == nullptr) {
-      // The key doesn't exist in the tree.
-      return 0;
-    }
-    return 1;
-  }
-  // Returns a count of the number of times the key appears in the btree.
-  template <typename K>
-  size_type count_multi(const K &key) const {
-    const auto range = equal_range(key);
-    return std::distance(range.first, range.second);
-  }
-
   // Clear the btree, deleting all of the values it contains.
   void clear();
 
@@ -1284,14 +1573,16 @@
   void swap(btree &other);
 
   const key_compare &key_comp() const noexcept {
-    return root_.template get<0>();
+    return rightmost_.template get<0>();
   }
   template <typename K1, typename K2>
   bool compare_keys(const K1 &a, const K2 &b) const {
     return compare_internal::compare_result_as_less_than(key_comp()(a, b));
   }
 
-  value_compare value_comp() const { return value_compare(key_comp()); }
+  value_compare value_comp() const {
+    return value_compare(original_key_compare(key_comp()));
+  }
 
   // Verifies the structure of the btree.
   void verify() const;
@@ -1329,6 +1620,7 @@
   }
 
   // The total number of bytes used by the btree.
+  // TODO(b/169338300): update to support node_btree_*.
   size_type bytes_used() const {
     node_stats stats = internal_stats(root());
     if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
@@ -1339,12 +1631,13 @@
     }
   }
 
-  // The average number of bytes used per value stored in the btree.
+  // The average number of bytes used per value stored in the btree assuming
+  // random insertion order.
   static double average_bytes_per_value() {
-    // Returns the number of bytes per value on a leaf node that is 75%
-    // full. Experimentally, this matches up nicely with the computed number of
-    // bytes per value in trees that had their values inserted in random order.
-    return node_type::LeafSize() / (kNodeValues * 0.75);
+    // The expected number of values per node with random insertion order is the
+    // average of the maximum and minimum numbers of values per node.
+    const double expected_values_per_node = (kNodeSlots + kMinNodeValues) / 2.0;
+    return node_type::LeafSize() / expected_values_per_node;
   }
 
   // The fullness of the btree. Computed as the number of elements in the btree
@@ -1354,7 +1647,7 @@
   // Returns 0 for empty trees.
   double fullness() const {
     if (empty()) return 0.0;
-    return static_cast<double>(size()) / (nodes() * kNodeValues);
+    return static_cast<double>(size()) / (nodes() * kNodeSlots);
   }
   // The overhead of the btree structure in bytes per node. Computed as the
   // total number of bytes used by the btree minus the number of bytes used for
@@ -1370,11 +1663,20 @@
   allocator_type get_allocator() const { return allocator(); }
 
  private:
+  friend struct btree_access;
+
   // Internal accessor routines.
-  node_type *root() { return root_.template get<2>(); }
-  const node_type *root() const { return root_.template get<2>(); }
-  node_type *&mutable_root() noexcept { return root_.template get<2>(); }
-  key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+  node_type *root() { return root_; }
+  const node_type *root() const { return root_; }
+  node_type *&mutable_root() noexcept { return root_; }
+  node_type *rightmost() { return rightmost_.template get<2>(); }
+  const node_type *rightmost() const { return rightmost_.template get<2>(); }
+  node_type *&mutable_rightmost() noexcept {
+    return rightmost_.template get<2>();
+  }
+  key_compare *mutable_key_comp() noexcept {
+    return &rightmost_.template get<0>();
+  }
 
   // The leftmost node is stored as the parent of the root node.
   node_type *leftmost() { return root()->parent(); }
@@ -1382,34 +1684,34 @@
 
   // Allocator routines.
   allocator_type *mutable_allocator() noexcept {
-    return &root_.template get<1>();
+    return &rightmost_.template get<1>();
   }
   const allocator_type &allocator() const noexcept {
-    return root_.template get<1>();
+    return rightmost_.template get<1>();
   }
 
   // Allocates a correctly aligned node of at least size bytes using the
   // allocator.
-  node_type *allocate(const size_type size) {
+  node_type *allocate(size_type size) {
     return reinterpret_cast<node_type *>(
         absl::container_internal::Allocate<node_type::Alignment()>(
             mutable_allocator(), size));
   }
 
   // Node creation/deletion routines.
-  node_type *new_internal_node(node_type *parent) {
+  node_type *new_internal_node(field_type position, node_type *parent) {
     node_type *n = allocate(node_type::InternalSize());
-    n->init_internal(parent);
+    n->init_internal(position, parent);
     return n;
   }
-  node_type *new_leaf_node(node_type *parent) {
+  node_type *new_leaf_node(field_type position, node_type *parent) {
     node_type *n = allocate(node_type::LeafSize());
-    n->init_leaf(parent, kNodeValues);
+    n->init_leaf(position, kNodeSlots, parent);
     return n;
   }
-  node_type *new_leaf_root_node(const int max_count) {
+  node_type *new_leaf_root_node(field_type max_count) {
     node_type *n = allocate(node_type::LeafSize(max_count));
-    n->init_leaf(/*parent=*/n, max_count);
+    n->init_leaf(/*position=*/0, max_count, /*parent=*/n);
     return n;
   }
 
@@ -1433,48 +1735,38 @@
   void try_shrink();
 
   iterator internal_end(iterator iter) {
-    return iter.node != nullptr ? iter : end();
+    return iter.node_ != nullptr ? iter : end();
   }
   const_iterator internal_end(const_iterator iter) const {
-    return iter.node != nullptr ? iter : end();
+    return iter.node_ != nullptr ? iter : end();
   }
 
   // Emplaces a value into the btree immediately before iter. Requires that
   // key(v) <= iter.key() and (--iter).key() <= key(v).
   template <typename... Args>
-  iterator internal_emplace(iterator iter, Args &&... args);
+  iterator internal_emplace(iterator iter, Args &&...args);
 
   // Returns an iterator pointing to the first value >= the value "iter" is
   // pointing at. Note that "iter" might be pointing to an invalid location such
-  // as iter.position == iter.node->finish(). This routine simply moves iter up
-  // in the tree to a valid location.
-  // Requires: iter.node is non-null.
+  // as iter.position_ == iter.node_->finish(). This routine simply moves iter
+  // up in the tree to a valid location. Requires: iter.node_ is non-null.
   template <typename IterType>
   static IterType internal_last(IterType iter);
 
   // Returns an iterator pointing to the leaf position at which key would
-  // reside in the tree. We provide 2 versions of internal_locate. The first
-  // version uses a less-than comparator and is incapable of distinguishing when
-  // there is an exact match. The second version is for the key-compare-to
-  // specialization and distinguishes exact matches. The key-compare-to
-  // specialization allows the caller to avoid a subsequent comparison to
-  // determine if an exact match was made, which is important for keys with
-  // expensive comparison, such as strings.
+  // reside in the tree, unless there is an exact match - in which case, the
+  // result may not be on a leaf. When there's a three-way comparator, we can
+  // return whether there was an exact match. This allows the caller to avoid a
+  // subsequent comparison to determine if an exact match was made, which is
+  // important for keys with expensive comparison, such as strings.
   template <typename K>
   SearchResult<iterator, is_key_compare_to::value> internal_locate(
       const K &key) const;
 
-  template <typename K>
-  SearchResult<iterator, false> internal_locate_impl(
-      const K &key, std::false_type /* IsCompareTo */) const;
-
-  template <typename K>
-  SearchResult<iterator, true> internal_locate_impl(
-      const K &key, std::true_type /* IsCompareTo */) const;
-
   // Internal routine which implements lower_bound().
   template <typename K>
-  iterator internal_lower_bound(const K &key) const;
+  SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+      const K &key) const;
 
   // Internal routine which implements upper_bound().
   template <typename K>
@@ -1485,15 +1777,15 @@
   iterator internal_find(const K &key) const;
 
   // Verifies the tree structure of node.
-  int internal_verify(const node_type *node, const key_type *lo,
-                      const key_type *hi) const;
+  size_type internal_verify(const node_type *node, const key_type *lo,
+                            const key_type *hi) const;
 
   node_stats internal_stats(const node_type *node) const {
     // The root can be a static empty node.
     if (node == nullptr || (node == root() && empty())) {
       return node_stats(0, 0);
     }
-    if (node->leaf()) {
+    if (node->is_leaf()) {
       return node_stats(1, 0);
     }
     node_stats res(0, 1);
@@ -1503,22 +1795,14 @@
     return res;
   }
 
- public:
-  // Exposed only for tests.
-  static bool testonly_uses_linear_node_search() {
-    return node_type::testonly_uses_linear_node_search();
-  }
-
- private:
-  // We use compressed tuple in order to save space because key_compare and
-  // allocator_type are usually empty.
-  absl::container_internal::CompressedTuple<key_compare, allocator_type,
-                                            node_type *>
-      root_;
+  node_type *root_;
 
   // A pointer to the rightmost node. Note that the leftmost node is stored as
-  // the root's parent.
-  node_type *rightmost_;
+  // the root's parent. We use compressed tuple in order to save space because
+  // key_compare and allocator_type are usually empty.
+  absl::container_internal::CompressedTuple<key_compare, allocator_type,
+                                            node_type *>
+      rightmost_;
 
   // Number of values.
   size_type size_;
@@ -1528,9 +1812,9 @@
 // btree_node methods
 template <typename P>
 template <typename... Args>
-inline void btree_node<P>::emplace_value(const size_type i,
+inline void btree_node<P>::emplace_value(const field_type i,
                                          allocator_type *alloc,
-                                         Args &&... args) {
+                                         Args &&...args) {
   assert(i >= start());
   assert(i <= finish());
   // Shift old values to create space for new value and then construct it in
@@ -1539,11 +1823,11 @@
     transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
                         alloc);
   }
-  value_init(i, alloc, std::forward<Args>(args)...);
+  value_init(static_cast<field_type>(i), alloc, std::forward<Args>(args)...);
   set_finish(finish() + 1);
 
-  if (!leaf() && finish() > i + 1) {
-    for (int j = finish(); j > i + 1; --j) {
+  if (is_internal() && finish() > i + 1) {
+    for (field_type j = finish(); j > i + 1; --j) {
       set_child(j, child(j - 1));
     }
     clear_child(i + 1);
@@ -1560,13 +1844,13 @@
   const field_type src_i = i + to_erase;
   transfer_n(orig_finish - src_i, i, src_i, this, alloc);
 
-  if (!leaf()) {
+  if (is_internal()) {
     // Delete all children between begin and end.
-    for (int j = 0; j < to_erase; ++j) {
+    for (field_type j = 0; j < to_erase; ++j) {
       clear_and_delete(child(i + j + 1), alloc);
     }
     // Rotate children after end into new positions.
-    for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+    for (field_type j = i + to_erase + 1; j <= orig_finish; ++j) {
       set_child(j - to_erase, child(j));
       clear_child(j);
     }
@@ -1575,7 +1859,7 @@
 }
 
 template <typename P>
-void btree_node<P>::rebalance_right_to_left(const int to_move,
+void btree_node<P>::rebalance_right_to_left(field_type to_move,
                                             btree_node *right,
                                             allocator_type *alloc) {
   assert(parent() == right->parent());
@@ -1597,12 +1881,12 @@
   right->transfer_n(right->count() - to_move, right->start(),
                     right->start() + to_move, right, alloc);
 
-  if (!leaf()) {
+  if (is_internal()) {
     // Move the child pointers from the right to the left node.
-    for (int i = 0; i < to_move; ++i) {
+    for (field_type i = 0; i < to_move; ++i) {
       init_child(finish() + i + 1, right->child(i));
     }
-    for (int i = right->start(); i <= right->finish() - to_move; ++i) {
+    for (field_type i = right->start(); i <= right->finish() - to_move; ++i) {
       assert(i + to_move <= right->max_count());
       right->init_child(i, right->child(i + to_move));
       right->clear_child(i + to_move);
@@ -1615,7 +1899,7 @@
 }
 
 template <typename P>
-void btree_node<P>::rebalance_left_to_right(const int to_move,
+void btree_node<P>::rebalance_left_to_right(field_type to_move,
                                             btree_node *right,
                                             allocator_type *alloc) {
   assert(parent() == right->parent());
@@ -1644,13 +1928,13 @@
   // 4) Move the new delimiting value to the parent from the left node.
   parent()->transfer(position(), finish() - to_move, this, alloc);
 
-  if (!leaf()) {
+  if (is_internal()) {
     // Move the child pointers from the left to the right node.
-    for (int i = right->finish(); i >= right->start(); --i) {
-      right->init_child(i + to_move, right->child(i));
-      right->clear_child(i);
+    for (field_type i = right->finish() + 1; i > right->start(); --i) {
+      right->init_child(i - 1 + to_move, right->child(i - 1));
+      right->clear_child(i - 1);
     }
-    for (int i = 1; i <= to_move; ++i) {
+    for (field_type i = 1; i <= to_move; ++i) {
       right->init_child(i - 1, child(finish() - to_move + i));
       clear_child(finish() - to_move + i);
     }
@@ -1665,7 +1949,9 @@
 void btree_node<P>::split(const int insert_position, btree_node *dest,
                           allocator_type *alloc) {
   assert(dest->count() == 0);
-  assert(max_count() == kNodeValues);
+  assert(max_count() == kNodeSlots);
+  assert(position() + 1 == dest->position());
+  assert(parent() == dest->parent());
 
   // We bias the split based on the position being inserted. If we're
   // inserting at the beginning of the left node then bias the split to put
@@ -1673,7 +1959,7 @@
   // right node then bias the split to put more values on the left node.
   if (insert_position == start()) {
     dest->set_finish(dest->start() + finish() - 1);
-  } else if (insert_position == kNodeValues) {
+  } else if (insert_position == kNodeSlots) {
     dest->set_finish(dest->start());
   } else {
     dest->set_finish(dest->start() + count() / 2);
@@ -1688,10 +1974,10 @@
   --mutable_finish();
   parent()->emplace_value(position(), alloc, finish_slot());
   value_destroy(finish(), alloc);
-  parent()->init_child(position() + 1, dest);
+  parent()->set_child_noupdate_position(position() + 1, dest);
 
-  if (!leaf()) {
-    for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
+  if (is_internal()) {
+    for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish();
          ++i, ++j) {
       assert(child(j) != nullptr);
       dest->init_child(i, child(j));
@@ -1711,9 +1997,10 @@
   // Move the values from the right to the left node.
   transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
 
-  if (!leaf()) {
+  if (is_internal()) {
     // Move the child pointers from the right to the left node.
-    for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
+    for (field_type i = src->start(), j = finish() + 1; i <= src->finish();
+         ++i, ++j) {
       init_child(j, src->child(i));
       src->clear_child(i);
     }
@@ -1729,7 +2016,7 @@
 
 template <typename P>
 void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
-  if (node->leaf()) {
+  if (node->is_leaf()) {
     node->value_destroy_n(node->start(), node->count(), alloc);
     deallocate(LeafSize(node->max_count()), node, alloc);
     return;
@@ -1743,24 +2030,35 @@
   btree_node *delete_root_parent = node->parent();
 
   // Navigate to the leftmost leaf under node, and then delete upwards.
-  while (!node->leaf()) node = node->start_child();
-  // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which
-  // isn't guaranteed to be a valid `field_type`.
-  int pos = node->position();
+  while (node->is_internal()) node = node->start_child();
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+  // When generations are enabled, we delete the leftmost leaf last in case it's
+  // the parent of the root and we need to check whether it's a leaf before we
+  // can update the root's generation.
+  // TODO(ezb): if we change btree_node::is_root to check a bool inside the node
+  // instead of checking whether the parent is a leaf, we can remove this logic.
+  btree_node *leftmost_leaf = node;
+#endif
+  // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`,
+  // which isn't guaranteed to be a valid `field_type`.
+  size_type pos = node->position();
   btree_node *parent = node->parent();
   for (;;) {
     // In each iteration of the next loop, we delete one leaf node and go right.
     assert(pos <= parent->finish());
     do {
-      node = parent->child(pos);
-      if (!node->leaf()) {
+      node = parent->child(static_cast<field_type>(pos));
+      if (node->is_internal()) {
         // Navigate to the leftmost leaf under node.
-        while (!node->leaf()) node = node->start_child();
+        while (node->is_internal()) node = node->start_child();
         pos = node->position();
         parent = node->parent();
       }
       node->value_destroy_n(node->start(), node->count(), alloc);
-      deallocate(LeafSize(node->max_count()), node, alloc);
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+      if (leftmost_leaf != node)
+#endif
+        deallocate(LeafSize(node->max_count()), node, alloc);
       ++pos;
     } while (pos <= parent->finish());
 
@@ -1772,7 +2070,12 @@
       parent = node->parent();
       node->value_destroy_n(node->start(), node->count(), alloc);
       deallocate(InternalSize(), node, alloc);
-      if (parent == delete_root_parent) return;
+      if (parent == delete_root_parent) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+        deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc);
+#endif
+        return;
+      }
       ++pos;
     } while (pos > parent->finish());
   }
@@ -1780,51 +2083,109 @@
 
 ////
 // btree_iterator methods
+
+// Note: the implementation here is based on btree_node::clear_and_delete.
+template <typename N, typename R, typename P>
+auto btree_iterator<N, R, P>::distance_slow(const_iterator other) const
+    -> difference_type {
+  const_iterator begin = other;
+  const_iterator end = *this;
+  assert(begin.node_ != end.node_ || !begin.node_->is_leaf() ||
+         begin.position_ != end.position_);
+
+  const node_type *node = begin.node_;
+  // We need to compensate for double counting if begin.node_ is a leaf node.
+  difference_type count = node->is_leaf() ? -begin.position_ : 0;
+
+  // First navigate to the leftmost leaf node past begin.
+  if (node->is_internal()) {
+    ++count;
+    node = node->child(begin.position_ + 1);
+  }
+  while (node->is_internal()) node = node->start_child();
+
+  // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`,
+  // which isn't guaranteed to be a valid `field_type`.
+  size_type pos = node->position();
+  const node_type *parent = node->parent();
+  for (;;) {
+    // In each iteration of the next loop, we count one leaf node and go right.
+    assert(pos <= parent->finish());
+    do {
+      node = parent->child(static_cast<field_type>(pos));
+      if (node->is_internal()) {
+        // Navigate to the leftmost leaf under node.
+        while (node->is_internal()) node = node->start_child();
+        pos = node->position();
+        parent = node->parent();
+      }
+      if (node == end.node_) return count + end.position_;
+      if (parent == end.node_ && pos == static_cast<size_type>(end.position_))
+        return count + node->count();
+      // +1 is for the next internal node value.
+      count += node->count() + 1;
+      ++pos;
+    } while (pos <= parent->finish());
+
+    // Once we've counted all children of parent, go up/right.
+    assert(pos > parent->finish());
+    do {
+      node = parent;
+      pos = node->position();
+      parent = node->parent();
+      // -1 because we counted the value at end and shouldn't.
+      if (parent == end.node_ && pos == static_cast<size_type>(end.position_))
+        return count - 1;
+      ++pos;
+    } while (pos > parent->finish());
+  }
+}
+
 template <typename N, typename R, typename P>
 void btree_iterator<N, R, P>::increment_slow() {
-  if (node->leaf()) {
-    assert(position >= node->finish());
+  if (node_->is_leaf()) {
+    assert(position_ >= node_->finish());
     btree_iterator save(*this);
-    while (position == node->finish() && !node->is_root()) {
-      assert(node->parent()->child(node->position()) == node);
-      position = node->position();
-      node = node->parent();
+    while (position_ == node_->finish() && !node_->is_root()) {
+      assert(node_->parent()->child(node_->position()) == node_);
+      position_ = node_->position();
+      node_ = node_->parent();
     }
     // TODO(ezb): assert we aren't incrementing end() instead of handling.
-    if (position == node->finish()) {
+    if (position_ == node_->finish()) {
       *this = save;
     }
   } else {
-    assert(position < node->finish());
-    node = node->child(position + 1);
-    while (!node->leaf()) {
-      node = node->start_child();
+    assert(position_ < node_->finish());
+    node_ = node_->child(static_cast<field_type>(position_ + 1));
+    while (node_->is_internal()) {
+      node_ = node_->start_child();
     }
-    position = node->start();
+    position_ = node_->start();
   }
 }
 
 template <typename N, typename R, typename P>
 void btree_iterator<N, R, P>::decrement_slow() {
-  if (node->leaf()) {
-    assert(position <= -1);
+  if (node_->is_leaf()) {
+    assert(position_ <= -1);
     btree_iterator save(*this);
-    while (position < node->start() && !node->is_root()) {
-      assert(node->parent()->child(node->position()) == node);
-      position = node->position() - 1;
-      node = node->parent();
+    while (position_ < node_->start() && !node_->is_root()) {
+      assert(node_->parent()->child(node_->position()) == node_);
+      position_ = node_->position() - 1;
+      node_ = node_->parent();
     }
     // TODO(ezb): assert we aren't decrementing begin() instead of handling.
-    if (position < node->start()) {
+    if (position_ < node_->start()) {
       *this = save;
     }
   } else {
-    assert(position >= node->start());
-    node = node->child(position);
-    while (!node->leaf()) {
-      node = node->child(node->finish());
+    assert(position_ >= node_->start());
+    node_ = node_->child(static_cast<field_type>(position_));
+    while (node_->is_internal()) {
+      node_ = node_->child(node_->finish());
     }
-    position = node->finish() - 1;
+    position_ = node_->finish() - 1;
   }
 }
 
@@ -1832,7 +2193,7 @@
 // btree methods
 template <typename P>
 template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree *other) {
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
   static_assert(std::is_same<btree, Btree>::value ||
                     std::is_same<const btree, Btree>::value,
                 "Btree type must be same or const.");
@@ -1840,14 +2201,14 @@
 
   // We can avoid key comparisons because we know the order of the
   // values is the same order we'll store them in.
-  auto iter = other->begin();
-  if (iter == other->end()) return;
-  insert_multi(maybe_move_from_iterator(iter));
+  auto iter = other.begin();
+  if (iter == other.end()) return;
+  insert_multi(iter.slot());
   ++iter;
-  for (; iter != other->end(); ++iter) {
+  for (; iter != other.end(); ++iter) {
     // If the btree is not empty, we can just insert the new value at the end
     // of the tree.
-    internal_emplace(end(), maybe_move_from_iterator(iter));
+    internal_emplace(end(), iter.slot());
   }
 }
 
@@ -1857,25 +2218,22 @@
                 "Key comparison must be nothrow copy constructible");
   static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
                 "Allocator must be nothrow copy constructible");
-  static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
+  static_assert(std::is_trivially_copyable<iterator>::value,
                 "iterator not trivially copyable.");
 
   // Note: We assert that kTargetValues, which is computed from
   // Params::kTargetNodeSize, must fit the node_type::field_type.
   static_assert(
-      kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))),
+      kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
       "target node size too large");
 
   // Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
-  using compare_result_type =
-      absl::result_of_t<key_compare(key_type, key_type)>;
   static_assert(
-      std::is_same<compare_result_type, bool>::value ||
-          std::is_convertible<compare_result_type, absl::weak_ordering>::value,
+      compare_has_valid_result_type<key_compare, key_type>(),
       "key comparison function must return absl::{weak,strong}_ordering or "
       "bool.");
 
-  // Test the assumption made in setting kNodeValueSpace.
+  // Test the assumption made in setting kNodeSlotSpace.
   static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
                 "node space assumption incorrect");
 
@@ -1883,31 +2241,29 @@
 }
 
 template <typename P>
-btree<P>::btree(const key_compare &comp, const allocator_type &alloc)
-    : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
-
-template <typename P>
-btree<P>::btree(const btree &other)
-    : btree(other.key_comp(), other.allocator()) {
-  copy_or_move_values_in_order(&other);
+template <typename K>
+auto btree<P>::lower_bound_equal(const K &key) const
+    -> std::pair<iterator, bool> {
+  const SearchResult<iterator, is_key_compare_to::value> res =
+      internal_lower_bound(key);
+  const iterator lower = iterator(internal_end(res.value));
+  const bool equal = res.HasMatch()
+                         ? res.IsEq()
+                         : lower != end() && !compare_keys(key, lower.key());
+  return {lower, equal};
 }
 
 template <typename P>
 template <typename K>
 auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
-  const iterator lower = lower_bound(key);
-  // TODO(ezb): we should be able to avoid this comparison when there's a
-  // three-way comparator.
-  if (lower == end() || compare_keys(key, lower.key())) return {lower, lower};
+  const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
+  const iterator lower = lower_and_equal.first;
+  if (!lower_and_equal.second) {
+    return {lower, lower};
+  }
 
   const iterator next = std::next(lower);
-  // When the comparator is heterogeneous, we can't assume that comparison with
-  // non-`key_type` will be equivalent to `key_type` comparisons so there
-  // could be multiple equivalent keys even in a unique-container. But for
-  // heterogeneous comparisons from the default string adapted comparators, we
-  // don't need to worry about this.
-  if (!is_multi_container::value &&
-      (std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
+  if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
     // The next iterator after lower must point to a key greater than `key`.
     // Note: if this assert fails, then it may indicate that the comparator does
     // not meet the equivalence requirements for Compare
@@ -1918,7 +2274,7 @@
   // Try once more to avoid the call to upper_bound() if there's only one
   // equivalent key. This should prevent all calls to upper_bound() in cases of
   // unique-containers with heterogeneous comparators in which all comparison
-  // operators are equivalent.
+  // operators have the same equivalence classes.
   if (next == end() || compare_keys(key, next.key())) return {lower, next};
 
   // In this case, we need to call upper_bound() to avoid worst case O(N)
@@ -1928,14 +2284,14 @@
 
 template <typename P>
 template <typename K, typename... Args>
-auto btree<P>::insert_unique(const K &key, Args &&... args)
+auto btree<P>::insert_unique(const K &key, Args &&...args)
     -> std::pair<iterator, bool> {
   if (empty()) {
-    mutable_root() = rightmost_ = new_leaf_root_node(1);
+    mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
   }
 
-  auto res = internal_locate(key);
-  iterator &iter = res.value;
+  SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+  iterator iter = res.value;
 
   if (res.HasMatch()) {
     if (res.IsEq()) {
@@ -1944,7 +2300,7 @@
     }
   } else {
     iterator last = internal_last(iter);
-    if (last.node && !compare_keys(key, last.key())) {
+    if (last.node_ && !compare_keys(key, last.key())) {
       // The key already exists in the tree, do nothing.
       return {last, false};
     }
@@ -1955,7 +2311,7 @@
 template <typename P>
 template <typename K, typename... Args>
 inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
-                                         Args &&... args)
+                                         Args &&...args)
     -> std::pair<iterator, bool> {
   if (!empty()) {
     if (position == end() || compare_keys(key, position.key())) {
@@ -1989,8 +2345,11 @@
 template <typename InputIterator>
 void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
   for (; b != e; ++b) {
-    init_type value(*b);
-    insert_hint_unique(end(), params_type::key(value), std::move(value));
+    // Use a node handle to manage a temp slot.
+    auto node_handle =
+        CommonAccess::Construct<node_handle_type>(get_allocator(), *b);
+    slot_type *slot = CommonAccess::GetSlot(node_handle);
+    insert_hint_unique(end(), params_type::key(slot), slot);
   }
 }
 
@@ -1998,11 +2357,11 @@
 template <typename ValueType>
 auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
   if (empty()) {
-    mutable_root() = rightmost_ = new_leaf_root_node(1);
+    mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
   }
 
   iterator iter = internal_upper_bound(key);
-  if (iter.node == nullptr) {
+  if (iter.node_ == nullptr) {
     iter = end();
   }
   return internal_emplace(iter, std::forward<ValueType>(v));
@@ -2049,7 +2408,7 @@
       *mutable_allocator() = other.allocator();
     }
 
-    copy_or_move_values_in_order(&other);
+    copy_or_move_values_in_order(other);
   }
   return *this;
 }
@@ -2062,15 +2421,15 @@
     using std::swap;
     if (absl::allocator_traits<
             allocator_type>::propagate_on_container_copy_assignment::value) {
-      // Note: `root_` also contains the allocator and the key comparator.
       swap(root_, other.root_);
+      // Note: `rightmost_` also contains the allocator and the key comparator.
       swap(rightmost_, other.rightmost_);
       swap(size_, other.size_);
     } else {
       if (allocator() == other.allocator()) {
         swap(mutable_root(), other.mutable_root());
         swap(*mutable_key_comp(), *other.mutable_key_comp());
-        swap(rightmost_, other.rightmost_);
+        swap(mutable_rightmost(), other.mutable_rightmost());
         swap(size_, other.size_);
       } else {
         // We aren't allowed to propagate the allocator and the allocator is
@@ -2079,7 +2438,7 @@
         // comparator while moving the values so we can't swap the key
         // comparators.
         *mutable_key_comp() = other.key_comp();
-        copy_or_move_values_in_order(&other);
+        copy_or_move_values_in_order(other);
       }
     }
   }
@@ -2088,22 +2447,34 @@
 
 template <typename P>
 auto btree<P>::erase(iterator iter) -> iterator {
-  bool internal_delete = false;
-  if (!iter.node->leaf()) {
-    // Deletion of a value on an internal node. First, move the largest value
-    // from our left child here, then delete that position (in remove_values()
-    // below). We can get to the largest value from our left child by
-    // decrementing iter.
+  iter.node_->value_destroy(static_cast<field_type>(iter.position_),
+                            mutable_allocator());
+  iter.update_generation();
+
+  const bool internal_delete = iter.node_->is_internal();
+  if (internal_delete) {
+    // Deletion of a value on an internal node. First, transfer the largest
+    // value from our left child here, then erase/rebalance from that position.
+    // We can get to the largest value from our left child by decrementing iter.
     iterator internal_iter(iter);
     --iter;
-    assert(iter.node->leaf());
-    params_type::move(mutable_allocator(), iter.node->slot(iter.position),
-                      internal_iter.node->slot(internal_iter.position));
-    internal_delete = true;
+    assert(iter.node_->is_leaf());
+    internal_iter.node_->transfer(
+        static_cast<size_type>(internal_iter.position_),
+        static_cast<size_type>(iter.position_), iter.node_,
+        mutable_allocator());
+  } else {
+    // Shift values after erased position in leaf. In the internal case, we
+    // don't need to do this because the leaf position is the end of the node.
+    const field_type transfer_from =
+        static_cast<field_type>(iter.position_ + 1);
+    const field_type num_to_transfer = iter.node_->finish() - transfer_from;
+    iter.node_->transfer_n(num_to_transfer,
+                           static_cast<size_type>(iter.position_),
+                           transfer_from, iter.node_, mutable_allocator());
   }
-
-  // Delete the key from the leaf.
-  iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
+  // Update node finish and container size.
+  iter.node_->set_finish(iter.node_->finish() - 1);
   --size_;
 
   // We want to return the next value after the one we just erased. If we
@@ -2111,7 +2482,7 @@
   // value is ++(++iter). If we erased from a leaf node (internal_delete ==
   // false) then the next value is ++iter. Note that ++iter may point to an
   // internal node and the value in the internal node may move to a leaf node
-  // (iter.node) when rebalancing is performed at the leaf level.
+  // (iter.node_) when rebalancing is performed at the leaf level.
 
   iterator res = rebalance_after_delete(iter);
 
@@ -2128,14 +2499,14 @@
   iterator res(iter);
   bool first_iteration = true;
   for (;;) {
-    if (iter.node == root()) {
+    if (iter.node_ == root()) {
       try_shrink();
       if (empty()) {
         return end();
       }
       break;
     }
-    if (iter.node->count() >= kMinNodeValues) {
+    if (iter.node_->count() >= kMinNodeValues) {
       break;
     }
     bool merged = try_merge_or_rebalance(&iter);
@@ -2148,14 +2519,15 @@
     if (!merged) {
       break;
     }
-    iter.position = iter.node->position();
-    iter.node = iter.node->parent();
+    iter.position_ = iter.node_->position();
+    iter.node_ = iter.node_->parent();
   }
+  res.update_generation();
 
   // Adjust our return value. If we're pointing at the end of a node, advance
   // the iterator.
-  if (res.position == res.node->finish()) {
-    res.position = res.node->finish() - 1;
+  if (res.position_ == res.node_->finish()) {
+    res.position_ = res.node_->finish() - 1;
     ++res;
   }
 
@@ -2165,75 +2537,54 @@
 template <typename P>
 auto btree<P>::erase_range(iterator begin, iterator end)
     -> std::pair<size_type, iterator> {
-  difference_type count = std::distance(begin, end);
+  size_type count = static_cast<size_type>(end - begin);
   assert(count >= 0);
 
   if (count == 0) {
     return {0, begin};
   }
 
-  if (count == size_) {
+  if (static_cast<size_type>(count) == size_) {
     clear();
     return {count, this->end()};
   }
 
-  if (begin.node == end.node) {
-    assert(end.position > begin.position);
-    begin.node->remove_values(begin.position, end.position - begin.position,
-                              mutable_allocator());
+  if (begin.node_ == end.node_) {
+    assert(end.position_ > begin.position_);
+    begin.node_->remove_values(
+        static_cast<field_type>(begin.position_),
+        static_cast<field_type>(end.position_ - begin.position_),
+        mutable_allocator());
     size_ -= count;
     return {count, rebalance_after_delete(begin)};
   }
 
   const size_type target_size = size_ - count;
   while (size_ > target_size) {
-    if (begin.node->leaf()) {
+    if (begin.node_->is_leaf()) {
       const size_type remaining_to_erase = size_ - target_size;
-      const size_type remaining_in_node = begin.node->finish() - begin.position;
-      const size_type to_erase =
-          (std::min)(remaining_to_erase, remaining_in_node);
-      begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+      const size_type remaining_in_node =
+          static_cast<size_type>(begin.node_->finish() - begin.position_);
+      const field_type to_erase = static_cast<field_type>(
+          (std::min)(remaining_to_erase, remaining_in_node));
+      begin.node_->remove_values(static_cast<field_type>(begin.position_),
+                                 to_erase, mutable_allocator());
       size_ -= to_erase;
       begin = rebalance_after_delete(begin);
     } else {
       begin = erase(begin);
     }
   }
+  begin.update_generation();
   return {count, begin};
 }
 
 template <typename P>
-template <typename K>
-auto btree<P>::erase_unique(const K &key) -> size_type {
-  const iterator iter = internal_find(key);
-  if (iter.node == nullptr) {
-    // The key doesn't exist in the tree, return nothing done.
-    return 0;
-  }
-  erase(iter);
-  return 1;
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::erase_multi(const K &key) -> size_type {
-  const iterator begin = internal_lower_bound(key);
-  if (begin.node == nullptr) {
-    // The key doesn't exist in the tree, return nothing done.
-    return 0;
-  }
-  // Delete all of the keys between begin and upper_bound(key).
-  const iterator end = internal_end(internal_upper_bound(key));
-  return erase_range(begin, end).first;
-}
-
-template <typename P>
 void btree<P>::clear() {
   if (!empty()) {
     node_type::clear_and_delete(root(), mutable_allocator());
   }
-  mutable_root() = EmptyNode();
-  rightmost_ = EmptyNode();
+  mutable_root() = mutable_rightmost() = EmptyNode();
   size_ = 0;
 }
 
@@ -2242,15 +2593,15 @@
   using std::swap;
   if (absl::allocator_traits<
           allocator_type>::propagate_on_container_swap::value) {
-    // Note: `root_` also contains the allocator and the key comparator.
-    swap(root_, other.root_);
+    // Note: `rightmost_` also contains the allocator and the key comparator.
+    swap(rightmost_, other.rightmost_);
   } else {
     // It's undefined behavior if the allocators are unequal here.
     assert(allocator() == other.allocator());
-    swap(mutable_root(), other.mutable_root());
+    swap(mutable_rightmost(), other.mutable_rightmost());
     swap(*mutable_key_comp(), *other.mutable_key_comp());
   }
-  swap(rightmost_, other.rightmost_);
+  swap(mutable_root(), other.mutable_root());
   swap(size_, other.size_);
 }
 
@@ -2258,20 +2609,20 @@
 void btree<P>::verify() const {
   assert(root() != nullptr);
   assert(leftmost() != nullptr);
-  assert(rightmost_ != nullptr);
+  assert(rightmost() != nullptr);
   assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
-  assert(leftmost() == (++const_iterator(root(), -1)).node);
-  assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
-  assert(leftmost()->leaf());
-  assert(rightmost_->leaf());
+  assert(leftmost() == (++const_iterator(root(), -1)).node_);
+  assert(rightmost() == (--const_iterator(root(), root()->finish())).node_);
+  assert(leftmost()->is_leaf());
+  assert(rightmost()->is_leaf());
 }
 
 template <typename P>
 void btree<P>::rebalance_or_split(iterator *iter) {
-  node_type *&node = iter->node;
-  int &insert_position = iter->position;
+  node_type *&node = iter->node_;
+  int &insert_position = iter->position_;
   assert(node->count() == node->max_count());
-  assert(kNodeValues == node->max_count());
+  assert(kNodeSlots == node->max_count());
 
   // First try to make room on the node by rebalancing.
   node_type *parent = node->parent();
@@ -2279,21 +2630,24 @@
     if (node->position() > parent->start()) {
       // Try rebalancing with our left sibling.
       node_type *left = parent->child(node->position() - 1);
-      assert(left->max_count() == kNodeValues);
-      if (left->count() < kNodeValues) {
+      assert(left->max_count() == kNodeSlots);
+      if (left->count() < kNodeSlots) {
         // We bias rebalancing based on the position being inserted. If we're
         // inserting at the end of the right node then we bias rebalancing to
         // fill up the left node.
-        int to_move = (kNodeValues - left->count()) /
-                      (1 + (insert_position < kNodeValues));
-        to_move = (std::max)(1, to_move);
+        field_type to_move =
+            (kNodeSlots - left->count()) /
+            (1 + (static_cast<field_type>(insert_position) < kNodeSlots));
+        to_move = (std::max)(field_type{1}, to_move);
 
-        if (insert_position - to_move >= node->start() ||
-            left->count() + to_move < kNodeValues) {
+        if (static_cast<field_type>(insert_position) - to_move >=
+                node->start() ||
+            left->count() + to_move < kNodeSlots) {
           left->rebalance_right_to_left(to_move, node, mutable_allocator());
 
           assert(node->max_count() - node->count() == to_move);
-          insert_position = insert_position - to_move;
+          insert_position = static_cast<int>(
+              static_cast<field_type>(insert_position) - to_move);
           if (insert_position < node->start()) {
             insert_position = insert_position + left->count() + 1;
             node = left;
@@ -2308,17 +2662,18 @@
     if (node->position() < parent->finish()) {
       // Try rebalancing with our right sibling.
       node_type *right = parent->child(node->position() + 1);
-      assert(right->max_count() == kNodeValues);
-      if (right->count() < kNodeValues) {
+      assert(right->max_count() == kNodeSlots);
+      if (right->count() < kNodeSlots) {
         // We bias rebalancing based on the position being inserted. If we're
         // inserting at the beginning of the left node then we bias rebalancing
         // to fill up the right node.
-        int to_move = (kNodeValues - right->count()) /
-                      (1 + (insert_position > node->start()));
-        to_move = (std::max)(1, to_move);
+        field_type to_move = (kNodeSlots - right->count()) /
+                             (1 + (insert_position > node->start()));
+        to_move = (std::max)(field_type{1}, to_move);
 
-        if (insert_position <= node->finish() - to_move ||
-            right->count() + to_move < kNodeValues) {
+        if (static_cast<field_type>(insert_position) <=
+                node->finish() - to_move ||
+            right->count() + to_move < kNodeSlots) {
           node->rebalance_left_to_right(to_move, right, mutable_allocator());
 
           if (insert_position > node->finish()) {
@@ -2334,31 +2689,33 @@
 
     // Rebalancing failed, make sure there is room on the parent node for a new
     // value.
-    assert(parent->max_count() == kNodeValues);
-    if (parent->count() == kNodeValues) {
-      iterator parent_iter(node->parent(), node->position());
+    assert(parent->max_count() == kNodeSlots);
+    if (parent->count() == kNodeSlots) {
+      iterator parent_iter(parent, node->position());
       rebalance_or_split(&parent_iter);
+      parent = node->parent();
     }
   } else {
     // Rebalancing not possible because this is the root node.
     // Create a new root node and set the current root node as the child of the
     // new root.
-    parent = new_internal_node(parent);
-    parent->init_child(parent->start(), root());
+    parent = new_internal_node(/*position=*/0, parent);
+    parent->set_generation(root()->generation());
+    parent->init_child(parent->start(), node);
     mutable_root() = parent;
     // If the former root was a leaf node, then it's now the rightmost node.
-    assert(!parent->start_child()->leaf() ||
-           parent->start_child() == rightmost_);
+    assert(parent->start_child()->is_internal() ||
+           parent->start_child() == rightmost());
   }
 
   // Split the node.
   node_type *split_node;
-  if (node->leaf()) {
-    split_node = new_leaf_node(parent);
+  if (node->is_leaf()) {
+    split_node = new_leaf_node(node->position() + 1, parent);
     node->split(insert_position, split_node, mutable_allocator());
-    if (rightmost_ == node) rightmost_ = split_node;
+    if (rightmost() == node) mutable_rightmost() = split_node;
   } else {
-    split_node = new_internal_node(parent);
+    split_node = new_internal_node(node->position() + 1, parent);
     node->split(insert_position, split_node, mutable_allocator());
   }
 
@@ -2371,55 +2728,57 @@
 template <typename P>
 void btree<P>::merge_nodes(node_type *left, node_type *right) {
   left->merge(right, mutable_allocator());
-  if (rightmost_ == right) rightmost_ = left;
+  if (rightmost() == right) mutable_rightmost() = left;
 }
 
 template <typename P>
 bool btree<P>::try_merge_or_rebalance(iterator *iter) {
-  node_type *parent = iter->node->parent();
-  if (iter->node->position() > parent->start()) {
+  node_type *parent = iter->node_->parent();
+  if (iter->node_->position() > parent->start()) {
     // Try merging with our left sibling.
-    node_type *left = parent->child(iter->node->position() - 1);
-    assert(left->max_count() == kNodeValues);
-    if (1 + left->count() + iter->node->count() <= kNodeValues) {
-      iter->position += 1 + left->count();
-      merge_nodes(left, iter->node);
-      iter->node = left;
+    node_type *left = parent->child(iter->node_->position() - 1);
+    assert(left->max_count() == kNodeSlots);
+    if (1U + left->count() + iter->node_->count() <= kNodeSlots) {
+      iter->position_ += 1 + left->count();
+      merge_nodes(left, iter->node_);
+      iter->node_ = left;
       return true;
     }
   }
-  if (iter->node->position() < parent->finish()) {
+  if (iter->node_->position() < parent->finish()) {
     // Try merging with our right sibling.
-    node_type *right = parent->child(iter->node->position() + 1);
-    assert(right->max_count() == kNodeValues);
-    if (1 + iter->node->count() + right->count() <= kNodeValues) {
-      merge_nodes(iter->node, right);
+    node_type *right = parent->child(iter->node_->position() + 1);
+    assert(right->max_count() == kNodeSlots);
+    if (1U + iter->node_->count() + right->count() <= kNodeSlots) {
+      merge_nodes(iter->node_, right);
       return true;
     }
     // Try rebalancing with our right sibling. We don't perform rebalancing if
-    // we deleted the first element from iter->node and the node is not
+    // we deleted the first element from iter->node_ and the node is not
     // empty. This is a small optimization for the common pattern of deleting
     // from the front of the tree.
     if (right->count() > kMinNodeValues &&
-        (iter->node->count() == 0 || iter->position > iter->node->start())) {
-      int to_move = (right->count() - iter->node->count()) / 2;
-      to_move = (std::min)(to_move, right->count() - 1);
-      iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+        (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) {
+      field_type to_move = (right->count() - iter->node_->count()) / 2;
+      to_move =
+          (std::min)(to_move, static_cast<field_type>(right->count() - 1));
+      iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator());
       return false;
     }
   }
-  if (iter->node->position() > parent->start()) {
+  if (iter->node_->position() > parent->start()) {
     // Try rebalancing with our left sibling. We don't perform rebalancing if
-    // we deleted the last element from iter->node and the node is not
+    // we deleted the last element from iter->node_ and the node is not
     // empty. This is a small optimization for the common pattern of deleting
     // from the back of the tree.
-    node_type *left = parent->child(iter->node->position() - 1);
+    node_type *left = parent->child(iter->node_->position() - 1);
     if (left->count() > kMinNodeValues &&
-        (iter->node->count() == 0 || iter->position < iter->node->finish())) {
-      int to_move = (left->count() - iter->node->count()) / 2;
-      to_move = (std::min)(to_move, left->count() - 1);
-      left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
-      iter->position += to_move;
+        (iter->node_->count() == 0 ||
+         iter->position_ < iter->node_->finish())) {
+      field_type to_move = (left->count() - iter->node_->count()) / 2;
+      to_move = (std::min)(to_move, static_cast<field_type>(left->count() - 1));
+      left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator());
+      iter->position_ += to_move;
       return false;
     }
   }
@@ -2433,9 +2792,9 @@
     return;
   }
   // Deleted the last item on the root node, shrink the height of the tree.
-  if (orig_root->leaf()) {
+  if (orig_root->is_leaf()) {
     assert(size() == 0);
-    mutable_root() = rightmost_ = EmptyNode();
+    mutable_root() = mutable_rightmost() = EmptyNode();
   } else {
     node_type *child = orig_root->start_child();
     child->make_root();
@@ -2447,53 +2806,94 @@
 template <typename P>
 template <typename IterType>
 inline IterType btree<P>::internal_last(IterType iter) {
-  assert(iter.node != nullptr);
-  while (iter.position == iter.node->finish()) {
-    iter.position = iter.node->position();
-    iter.node = iter.node->parent();
-    if (iter.node->leaf()) {
-      iter.node = nullptr;
+  assert(iter.node_ != nullptr);
+  while (iter.position_ == iter.node_->finish()) {
+    iter.position_ = iter.node_->position();
+    iter.node_ = iter.node_->parent();
+    if (iter.node_->is_leaf()) {
+      iter.node_ = nullptr;
       break;
     }
   }
+  iter.update_generation();
   return iter;
 }
 
 template <typename P>
 template <typename... Args>
-inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
+inline auto btree<P>::internal_emplace(iterator iter, Args &&...args)
     -> iterator {
-  if (!iter.node->leaf()) {
+  if (iter.node_->is_internal()) {
     // We can't insert on an internal node. Instead, we'll insert after the
     // previous value which is guaranteed to be on a leaf node.
     --iter;
-    ++iter.position;
+    ++iter.position_;
   }
-  const field_type max_count = iter.node->max_count();
+  const field_type max_count = iter.node_->max_count();
   allocator_type *alloc = mutable_allocator();
-  if (iter.node->count() == max_count) {
+
+  const auto transfer_and_delete = [&](node_type *old_node,
+                                       node_type *new_node) {
+    new_node->transfer_n(old_node->count(), new_node->start(),
+                         old_node->start(), old_node, alloc);
+    new_node->set_finish(old_node->finish());
+    old_node->set_finish(old_node->start());
+    new_node->set_generation(old_node->generation());
+    node_type::clear_and_delete(old_node, alloc);
+  };
+  const auto replace_leaf_root_node = [&](field_type new_node_size) {
+    assert(iter.node_ == root());
+    node_type *old_root = iter.node_;
+    node_type *new_root = iter.node_ = new_leaf_root_node(new_node_size);
+    transfer_and_delete(old_root, new_root);
+    mutable_root() = mutable_rightmost() = new_root;
+  };
+
+  bool replaced_node = false;
+  if (iter.node_->count() == max_count) {
     // Make room in the leaf for the new item.
-    if (max_count < kNodeValues) {
+    if (max_count < kNodeSlots) {
       // Insertion into the root where the root is smaller than the full node
       // size. Simply grow the size of the root node.
-      assert(iter.node == root());
-      iter.node =
-          new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count));
-      // Transfer the values from the old root to the new root.
-      node_type *old_root = root();
-      node_type *new_root = iter.node;
-      new_root->transfer_n(old_root->count(), new_root->start(),
-                           old_root->start(), old_root, alloc);
-      new_root->set_finish(old_root->finish());
-      old_root->set_finish(old_root->start());
-      node_type::clear_and_delete(old_root, alloc);
-      mutable_root() = rightmost_ = new_root;
+      replace_leaf_root_node(static_cast<field_type>(
+          (std::min)(static_cast<int>(kNodeSlots), 2 * max_count)));
+      replaced_node = true;
     } else {
       rebalance_or_split(&iter);
     }
   }
-  iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
+  (void)replaced_node;
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+  if (!replaced_node) {
+    assert(iter.node_->is_leaf());
+    if (iter.node_->is_root()) {
+      replace_leaf_root_node(max_count);
+    } else {
+      node_type *old_node = iter.node_;
+      const bool was_rightmost = rightmost() == old_node;
+      const bool was_leftmost = leftmost() == old_node;
+      node_type *parent = old_node->parent();
+      const field_type position = old_node->position();
+      node_type *new_node = iter.node_ = new_leaf_node(position, parent);
+      parent->set_child_noupdate_position(position, new_node);
+      transfer_and_delete(old_node, new_node);
+      if (was_rightmost) mutable_rightmost() = new_node;
+      // The leftmost node is stored as the parent of the root node.
+      if (was_leftmost) root()->set_parent(new_node);
+    }
+  }
+#endif
+  iter.node_->emplace_value(static_cast<field_type>(iter.position_), alloc,
+                            std::forward<Args>(args)...);
+  assert(
+      iter.node_->is_ordered_correctly(static_cast<field_type>(iter.position_),
+                                       original_key_compare(key_comp())) &&
+      "If this assert fails, then either (1) the comparator may violate "
+      "transitivity, i.e. comp(a,b) && comp(b,c) -> comp(a,c) (see "
+      "https://en.cppreference.com/w/cpp/named_req/Compare), or (2) a "
+      "key may have been mutated after it was inserted into the tree.");
   ++size_;
+  iter.update_generation();
   return iter;
 }
 
@@ -2501,61 +2901,51 @@
 template <typename K>
 inline auto btree<P>::internal_locate(const K &key) const
     -> SearchResult<iterator, is_key_compare_to::value> {
-  return internal_locate_impl(key, is_key_compare_to());
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate_impl(
-    const K &key, std::false_type /* IsCompareTo */) const
-    -> SearchResult<iterator, false> {
   iterator iter(const_cast<node_type *>(root()));
   for (;;) {
-    iter.position = iter.node->lower_bound(key, key_comp()).value;
-    // NOTE: we don't need to walk all the way down the tree if the keys are
-    // equal, but determining equality would require doing an extra comparison
-    // on each node on the way down, and we will need to go all the way to the
-    // leaf node in the expected case.
-    if (iter.node->leaf()) {
-      break;
-    }
-    iter.node = iter.node->child(iter.position);
-  }
-  return {iter};
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate_impl(
-    const K &key, std::true_type /* IsCompareTo */) const
-    -> SearchResult<iterator, true> {
-  iterator iter(const_cast<node_type *>(root()));
-  for (;;) {
-    SearchResult<int, true> res = iter.node->lower_bound(key, key_comp());
-    iter.position = res.value;
-    if (res.match == MatchKind::kEq) {
+    SearchResult<size_type, is_key_compare_to::value> res =
+        iter.node_->lower_bound(key, key_comp());
+    iter.position_ = static_cast<int>(res.value);
+    if (res.IsEq()) {
       return {iter, MatchKind::kEq};
     }
-    if (iter.node->leaf()) {
+    // Note: in the non-key-compare-to case, we don't need to walk all the way
+    // down the tree if the keys are equal, but determining equality would
+    // require doing an extra comparison on each node on the way down, and we
+    // will need to go all the way to the leaf node in the expected case.
+    if (iter.node_->is_leaf()) {
       break;
     }
-    iter.node = iter.node->child(iter.position);
+    iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
   }
+  // Note: in the non-key-compare-to case, the key may actually be equivalent
+  // here (and the MatchKind::kNe is ignored).
   return {iter, MatchKind::kNe};
 }
 
 template <typename P>
 template <typename K>
-auto btree<P>::internal_lower_bound(const K &key) const -> iterator {
+auto btree<P>::internal_lower_bound(const K &key) const
+    -> SearchResult<iterator, is_key_compare_to::value> {
+  if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+    SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
+    ret.value = internal_last(ret.value);
+    return ret;
+  }
   iterator iter(const_cast<node_type *>(root()));
+  SearchResult<size_type, is_key_compare_to::value> res;
+  bool seen_eq = false;
   for (;;) {
-    iter.position = iter.node->lower_bound(key, key_comp()).value;
-    if (iter.node->leaf()) {
+    res = iter.node_->lower_bound(key, key_comp());
+    iter.position_ = static_cast<int>(res.value);
+    if (iter.node_->is_leaf()) {
       break;
     }
-    iter.node = iter.node->child(iter.position);
+    seen_eq = seen_eq || res.IsEq();
+    iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
   }
-  return internal_last(iter);
+  if (res.IsEq()) return {iter, MatchKind::kEq};
+  return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
 }
 
 template <typename P>
@@ -2563,11 +2953,11 @@
 auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
   iterator iter(const_cast<node_type *>(root()));
   for (;;) {
-    iter.position = iter.node->upper_bound(key, key_comp());
-    if (iter.node->leaf()) {
+    iter.position_ = static_cast<int>(iter.node_->upper_bound(key, key_comp()));
+    if (iter.node_->is_leaf()) {
       break;
     }
-    iter.node = iter.node->child(iter.position);
+    iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
   }
   return internal_last(iter);
 }
@@ -2575,14 +2965,14 @@
 template <typename P>
 template <typename K>
 auto btree<P>::internal_find(const K &key) const -> iterator {
-  auto res = internal_locate(key);
+  SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
   if (res.HasMatch()) {
     if (res.IsEq()) {
       return res.value;
     }
   } else {
     const iterator iter = internal_last(res.value);
-    if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+    if (iter.node_ != nullptr && !compare_keys(key, iter.key())) {
       return iter;
     }
   }
@@ -2590,8 +2980,8 @@
 }
 
 template <typename P>
-int btree<P>::internal_verify(const node_type *node, const key_type *lo,
-                              const key_type *hi) const {
+typename btree<P>::size_type btree<P>::internal_verify(
+    const node_type *node, const key_type *lo, const key_type *hi) const {
   assert(node->count() > 0);
   assert(node->count() <= node->max_count());
   if (lo) {
@@ -2603,9 +2993,9 @@
   for (int i = node->start() + 1; i < node->finish(); ++i) {
     assert(!compare_keys(node->key(i), node->key(i - 1)));
   }
-  int count = node->count();
-  if (!node->leaf()) {
-    for (int i = node->start(); i <= node->finish(); ++i) {
+  size_type count = node->count();
+  if (node->is_internal()) {
+    for (field_type i = node->start(); i <= node->finish(); ++i) {
       assert(node->child(i) != nullptr);
       assert(node->child(i)->parent() == node);
       assert(node->child(i)->position() == i);
@@ -2617,6 +3007,50 @@
   return count;
 }
 
+struct btree_access {
+  template <typename BtreeContainer, typename Pred>
+  static auto erase_if(BtreeContainer &container, Pred pred) ->
+      typename BtreeContainer::size_type {
+    const auto initial_size = container.size();
+    auto &tree = container.tree_;
+    auto *alloc = tree.mutable_allocator();
+    for (auto it = container.begin(); it != container.end();) {
+      if (!pred(*it)) {
+        ++it;
+        continue;
+      }
+      auto *node = it.node_;
+      if (node->is_internal()) {
+        // Handle internal nodes normally.
+        it = container.erase(it);
+        continue;
+      }
+      // If this is a leaf node, then we do all the erases from this node
+      // at once before doing rebalancing.
+
+      // The current position to transfer slots to.
+      int to_pos = it.position_;
+      node->value_destroy(it.position_, alloc);
+      while (++it.position_ < node->finish()) {
+        it.update_generation();
+        if (pred(*it)) {
+          node->value_destroy(it.position_, alloc);
+        } else {
+          node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc);
+        }
+      }
+      const int num_deleted = node->finish() - to_pos;
+      tree.size_ -= num_deleted;
+      node->set_finish(to_pos);
+      it.position_ = to_pos;
+      it = tree.rebalance_after_delete(it);
+    }
+    return initial_size - container.size();
+  }
+};
+
+#undef ABSL_BTREE_ENABLE_GENERATIONS
+
 }  // namespace container_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/container/internal/btree_container.h b/abseil-cpp/absl/container/internal/btree_container.h
index 137614f..a68ce44 100644
--- a/abseil-cpp/absl/container/internal/btree_container.h
+++ b/abseil-cpp/absl/container/internal/btree_container.h
@@ -20,9 +20,11 @@
 #include <iterator>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/internal/throw_delegate.h"
 #include "absl/container/internal/btree.h"  // IWYU pragma: export
 #include "absl/container/internal/common.h"
+#include "absl/memory/memory.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
@@ -42,15 +44,15 @@
   // transparent case.
   template <class K>
   using key_arg =
-      typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
-          template type<K, typename Tree::key_type>;
+      typename KeyArg<params_type::kIsKeyCompareTransparent>::template type<
+          K, typename Tree::key_type>;
 
  public:
   using key_type = typename Tree::key_type;
   using value_type = typename Tree::value_type;
   using size_type = typename Tree::size_type;
   using difference_type = typename Tree::difference_type;
-  using key_compare = typename Tree::key_compare;
+  using key_compare = typename Tree::original_key_compare;
   using value_compare = typename Tree::value_compare;
   using allocator_type = typename Tree::allocator_type;
   using reference = typename Tree::reference;
@@ -63,38 +65,80 @@
   using const_reverse_iterator = typename Tree::const_reverse_iterator;
   using node_type = typename Tree::node_handle_type;
 
+  struct extract_and_get_next_return_type {
+    node_type node;
+    iterator next;
+  };
+
   // Constructors/assignments.
   btree_container() : tree_(key_compare(), allocator_type()) {}
   explicit btree_container(const key_compare &comp,
                            const allocator_type &alloc = allocator_type())
       : tree_(comp, alloc) {}
-  btree_container(const btree_container &other) = default;
-  btree_container(btree_container &&other) noexcept = default;
+  explicit btree_container(const allocator_type &alloc)
+      : tree_(key_compare(), alloc) {}
+
+  btree_container(const btree_container &other)
+      : btree_container(other, absl::allocator_traits<allocator_type>::
+                                   select_on_container_copy_construction(
+                                       other.get_allocator())) {}
+  btree_container(const btree_container &other, const allocator_type &alloc)
+      : tree_(other.tree_, alloc) {}
+
+  btree_container(btree_container &&other) noexcept(
+      std::is_nothrow_move_constructible<Tree>::value) = default;
+  btree_container(btree_container &&other, const allocator_type &alloc)
+      : tree_(std::move(other.tree_), alloc) {}
+
   btree_container &operator=(const btree_container &other) = default;
   btree_container &operator=(btree_container &&other) noexcept(
       std::is_nothrow_move_assignable<Tree>::value) = default;
 
   // Iterator routines.
-  iterator begin() { return tree_.begin(); }
-  const_iterator begin() const { return tree_.begin(); }
-  const_iterator cbegin() const { return tree_.begin(); }
-  iterator end() { return tree_.end(); }
-  const_iterator end() const { return tree_.end(); }
-  const_iterator cend() const { return tree_.end(); }
-  reverse_iterator rbegin() { return tree_.rbegin(); }
-  const_reverse_iterator rbegin() const { return tree_.rbegin(); }
-  const_reverse_iterator crbegin() const { return tree_.rbegin(); }
-  reverse_iterator rend() { return tree_.rend(); }
-  const_reverse_iterator rend() const { return tree_.rend(); }
-  const_reverse_iterator crend() const { return tree_.rend(); }
+  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.begin(); }
+  const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.begin();
+  }
+  const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.begin();
+  }
+  iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.end(); }
+  const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.end();
+  }
+  const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.end();
+  }
+  reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.rbegin();
+  }
+  const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.rbegin();
+  }
+  const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.rbegin();
+  }
+  reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.rend(); }
+  const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.rend();
+  }
+  const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.rend();
+  }
 
   // Lookup routines.
   template <typename K = key_type>
-  iterator find(const key_arg<K> &key) {
+  size_type count(const key_arg<K> &key) const {
+    auto equal_range = this->equal_range(key);
+    return equal_range.second - equal_range.first;
+  }
+  template <typename K = key_type>
+  iterator find(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.find(key);
   }
   template <typename K = key_type>
-  const_iterator find(const key_arg<K> &key) const {
+  const_iterator find(const key_arg<K> &key) const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.find(key);
   }
   template <typename K = key_type>
@@ -102,28 +146,31 @@
     return find(key) != end();
   }
   template <typename K = key_type>
-  iterator lower_bound(const key_arg<K> &key) {
+  iterator lower_bound(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.lower_bound(key);
   }
   template <typename K = key_type>
-  const_iterator lower_bound(const key_arg<K> &key) const {
+  const_iterator lower_bound(const key_arg<K> &key) const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.lower_bound(key);
   }
   template <typename K = key_type>
-  iterator upper_bound(const key_arg<K> &key) {
+  iterator upper_bound(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.upper_bound(key);
   }
   template <typename K = key_type>
-  const_iterator upper_bound(const key_arg<K> &key) const {
+  const_iterator upper_bound(const key_arg<K> &key) const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.upper_bound(key);
   }
   template <typename K = key_type>
-  std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
+  std::pair<iterator, iterator> equal_range(const key_arg<K> &key)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.equal_range(key);
   }
   template <typename K = key_type>
   std::pair<const_iterator, const_iterator> equal_range(
-      const key_arg<K> &key) const {
+      const key_arg<K> &key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.equal_range(key);
   }
 
@@ -133,17 +180,37 @@
   // Erase the specified iterator from the btree. The iterator must be valid
   // (i.e. not equal to end()).  Return an iterator pointing to the node after
   // the one that was erased (or end() if none exists).
-  iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
-  iterator erase(iterator iter) { return tree_.erase(iter); }
-  iterator erase(const_iterator first, const_iterator last) {
+  iterator erase(const_iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.erase(iterator(iter));
+  }
+  iterator erase(iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return tree_.erase(iter);
+  }
+  iterator erase(const_iterator first,
+                 const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return tree_.erase_range(iterator(first), iterator(last)).second;
   }
+  template <typename K = key_type>
+  size_type erase(const key_arg<K> &key) {
+    auto equal_range = this->equal_range(key);
+    return tree_.erase_range(equal_range.first, equal_range.second).first;
+  }
 
   // Extract routines.
+  extract_and_get_next_return_type extract_and_get_next(const_iterator position)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    // Use Construct instead of Transfer because the rebalancing code will
+    // destroy the slot later.
+    // Note: we rely on erase() taking place after Construct().
+    return {CommonAccess::Construct<node_type>(get_allocator(),
+                                               iterator(position).slot()),
+            erase(position)};
+  }
   node_type extract(iterator position) {
-    // Use Move instead of Transfer, because the rebalancing code expects to
-    // have a valid object to scribble metadata bits on top of.
-    auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+    // Use Construct instead of Transfer because the rebalancing code will
+    // destroy the slot later.
+    auto node =
+        CommonAccess::Construct<node_type>(get_allocator(), position.slot());
     erase(position);
     return node;
   }
@@ -151,9 +218,8 @@
     return extract(iterator(position));
   }
 
- public:
   // Utility routines.
-  void clear() { tree_.clear(); }
+  ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
   void swap(btree_container &other) { tree_.swap(other.tree_); }
   void verify() const { tree_.verify(); }
 
@@ -191,7 +257,7 @@
   allocator_type get_allocator() const { return tree_.get_allocator(); }
 
   // The key comparator used by the btree.
-  key_compare key_comp() const { return tree_.key_comp(); }
+  key_compare key_comp() const { return key_compare(tree_.key_comp()); }
   value_compare value_comp() const { return tree_.value_comp(); }
 
   // Support absl::Hash.
@@ -204,6 +270,7 @@
   }
 
  protected:
+  friend struct btree_access;
   Tree tree_;
 };
 
@@ -224,7 +291,7 @@
   using key_type = typename Tree::key_type;
   using value_type = typename Tree::value_type;
   using size_type = typename Tree::size_type;
-  using key_compare = typename Tree::key_compare;
+  using key_compare = typename Tree::original_key_compare;
   using allocator_type = typename Tree::allocator_type;
   using iterator = typename Tree::iterator;
   using const_iterator = typename Tree::const_iterator;
@@ -235,7 +302,7 @@
   using super_type::super_type;
   btree_set_container() {}
 
-  // Range constructor.
+  // Range constructors.
   template <class InputIterator>
   btree_set_container(InputIterator b, InputIterator e,
                       const key_compare &comp = key_compare(),
@@ -243,46 +310,59 @@
       : super_type(comp, alloc) {
     insert(b, e);
   }
+  template <class InputIterator>
+  btree_set_container(InputIterator b, InputIterator e,
+                      const allocator_type &alloc)
+      : btree_set_container(b, e, key_compare(), alloc) {}
 
-  // Initializer list constructor.
+  // Initializer list constructors.
   btree_set_container(std::initializer_list<init_type> init,
                       const key_compare &comp = key_compare(),
                       const allocator_type &alloc = allocator_type())
       : btree_set_container(init.begin(), init.end(), comp, alloc) {}
-
-  // Lookup routines.
-  template <typename K = key_type>
-  size_type count(const key_arg<K> &key) const {
-    return this->tree_.count_unique(key);
-  }
+  btree_set_container(std::initializer_list<init_type> init,
+                      const allocator_type &alloc)
+      : btree_set_container(init.begin(), init.end(), alloc) {}
 
   // Insertion routines.
-  std::pair<iterator, bool> insert(const value_type &v) {
+  std::pair<iterator, bool> insert(const value_type &v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_.insert_unique(params_type::key(v), v);
   }
-  std::pair<iterator, bool> insert(value_type &&v) {
+  std::pair<iterator, bool> insert(value_type &&v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_.insert_unique(params_type::key(v), std::move(v));
   }
   template <typename... Args>
-  std::pair<iterator, bool> emplace(Args &&... args) {
-    init_type v(std::forward<Args>(args)...);
-    return this->tree_.insert_unique(params_type::key(v), std::move(v));
+  std::pair<iterator, bool> emplace(Args &&...args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    // Use a node handle to manage a temp slot.
+    auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+                                                   std::forward<Args>(args)...);
+    auto *slot = CommonAccess::GetSlot(node);
+    return this->tree_.insert_unique(params_type::key(slot), slot);
   }
-  iterator insert(const_iterator hint, const value_type &v) {
+  iterator insert(const_iterator hint,
+                  const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_
         .insert_hint_unique(iterator(hint), params_type::key(v), v)
         .first;
   }
-  iterator insert(const_iterator hint, value_type &&v) {
+  iterator insert(const_iterator hint,
+                  value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_
         .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
         .first;
   }
   template <typename... Args>
-  iterator emplace_hint(const_iterator hint, Args &&... args) {
-    init_type v(std::forward<Args>(args)...);
+  iterator emplace_hint(const_iterator hint,
+                        Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    // Use a node handle to manage a temp slot.
+    auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+                                                   std::forward<Args>(args)...);
+    auto *slot = CommonAccess::GetSlot(node);
     return this->tree_
-        .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+        .insert_hint_unique(iterator(hint), params_type::key(slot), slot)
         .first;
   }
   template <typename InputIterator>
@@ -292,7 +372,7 @@
   void insert(std::initializer_list<init_type> init) {
     this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
   }
-  insert_return_type insert(node_type &&node) {
+  insert_return_type insert(node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (!node) return {this->end(), false, node_type()};
     std::pair<iterator, bool> res =
         this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)),
@@ -304,7 +384,8 @@
       return {res.first, false, std::move(node)};
     }
   }
-  iterator insert(const_iterator hint, node_type &&node) {
+  iterator insert(const_iterator hint,
+                  node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (!node) return this->end();
     std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
         iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
@@ -313,20 +394,13 @@
     return res.first;
   }
 
-  // Deletion routines.
-  // TODO(ezb): we should support heterogeneous comparators that have different
-  // behavior for K!=key_type.
-  template <typename K = key_type>
-  size_type erase(const key_arg<K> &key) {
-    return this->tree_.erase_unique(key);
-  }
-  using super_type::erase;
-
   // Node extraction routines.
   template <typename K = key_type>
   node_type extract(const key_arg<K> &key) {
-    auto it = this->find(key);
-    return it == this->end() ? node_type() : extract(it);
+    const std::pair<iterator, bool> lower_and_equal =
+        this->tree_.lower_bound_equal(key);
+    return lower_and_equal.second ? extract(lower_and_equal.first)
+                                  : node_type();
   }
   using super_type::extract;
 
@@ -344,7 +418,7 @@
           int> = 0>
   void merge(btree_container<T> &src) {  // NOLINT
     for (auto src_it = src.begin(); src_it != src.end();) {
-      if (insert(std::move(*src_it)).second) {
+      if (insert(std::move(params_type::element(src_it.slot()))).second) {
         src_it = src.erase(src_it);
       } else {
         ++src_it;
@@ -371,6 +445,7 @@
 class btree_map_container : public btree_set_container<Tree> {
   using super_type = btree_set_container<Tree>;
   using params_type = typename Tree::params_type;
+  friend class BtreeNodePeer;
 
  private:
   template <class K>
@@ -380,7 +455,7 @@
   using key_type = typename Tree::key_type;
   using mapped_type = typename params_type::mapped_type;
   using value_type = typename Tree::value_type;
-  using key_compare = typename Tree::key_compare;
+  using key_compare = typename Tree::original_key_compare;
   using allocator_type = typename Tree::allocator_type;
   using iterator = typename Tree::iterator;
   using const_iterator = typename Tree::const_iterator;
@@ -393,37 +468,43 @@
   // Note: the nullptr template arguments and extra `const M&` overloads allow
   // for supporting bitfield arguments.
   template <typename K = key_type, class M>
-  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
-                                             const M &obj) {
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, const M &obj)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(k, obj);
   }
   template <typename K = key_type, class M, K * = nullptr>
-  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(std::forward<K>(k), obj);
   }
   template <typename K = key_type, class M, M * = nullptr>
-  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(k, std::forward<M>(obj));
   }
   template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
-  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
   }
   template <typename K = key_type, class M>
   iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
-                            const M &obj) {
+                            const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_hint_impl(hint, k, obj);
   }
   template <typename K = key_type, class M, K * = nullptr>
-  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k,
+                            const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
   }
   template <typename K = key_type, class M, M * = nullptr>
-  iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+  iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
+                            M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
   }
   template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
-  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k,
+                            M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_hint_impl(hint, std::forward<K>(k),
                                       std::forward<M>(obj));
   }
@@ -431,44 +512,48 @@
   template <typename K = key_type, typename... Args,
             typename absl::enable_if_t<
                 !std::is_convertible<K, const_iterator>::value, int> = 0>
-  std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+  std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&...args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace_impl(k, std::forward<Args>(args)...);
   }
   template <typename K = key_type, typename... Args,
             typename absl::enable_if_t<
                 !std::is_convertible<K, const_iterator>::value, int> = 0>
-  std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+  std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&...args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
   }
   template <typename K = key_type, typename... Args>
   iterator try_emplace(const_iterator hint, const key_arg<K> &k,
-                       Args &&... args) {
+                       Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
   }
   template <typename K = key_type, typename... Args>
-  iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+  iterator try_emplace(const_iterator hint, key_arg<K> &&k,
+                       Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace_hint_impl(hint, std::forward<K>(k),
                                  std::forward<Args>(args)...);
   }
 
   template <typename K = key_type>
-  mapped_type &operator[](const key_arg<K> &k) {
+  mapped_type &operator[](const key_arg<K> &k) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace(k).first->second;
   }
   template <typename K = key_type>
-  mapped_type &operator[](key_arg<K> &&k) {
+  mapped_type &operator[](key_arg<K> &&k) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace(std::forward<K>(k)).first->second;
   }
 
   template <typename K = key_type>
-  mapped_type &at(const key_arg<K> &key) {
+  mapped_type &at(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = this->find(key);
     if (it == this->end())
       base_internal::ThrowStdOutOfRange("absl::btree_map::at");
     return it->second;
   }
   template <typename K = key_type>
-  const mapped_type &at(const key_arg<K> &key) const {
+  const mapped_type &at(const key_arg<K> &key) const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = this->find(key);
     if (it == this->end())
       base_internal::ThrowStdOutOfRange("absl::btree_map::at");
@@ -517,6 +602,7 @@
   using params_type = typename Tree::params_type;
   using init_type = typename params_type::init_type;
   using is_key_compare_to = typename params_type::is_key_compare_to;
+  friend class BtreeNodePeer;
 
   template <class K>
   using key_arg = typename super_type::template key_arg<K>;
@@ -525,7 +611,7 @@
   using key_type = typename Tree::key_type;
   using value_type = typename Tree::value_type;
   using size_type = typename Tree::size_type;
-  using key_compare = typename Tree::key_compare;
+  using key_compare = typename Tree::original_key_compare;
   using allocator_type = typename Tree::allocator_type;
   using iterator = typename Tree::iterator;
   using const_iterator = typename Tree::const_iterator;
@@ -535,7 +621,7 @@
   using super_type::super_type;
   btree_multiset_container() {}
 
-  // Range constructor.
+  // Range constructors.
   template <class InputIterator>
   btree_multiset_container(InputIterator b, InputIterator e,
                            const key_compare &comp = key_compare(),
@@ -543,28 +629,33 @@
       : super_type(comp, alloc) {
     insert(b, e);
   }
+  template <class InputIterator>
+  btree_multiset_container(InputIterator b, InputIterator e,
+                           const allocator_type &alloc)
+      : btree_multiset_container(b, e, key_compare(), alloc) {}
 
-  // Initializer list constructor.
+  // Initializer list constructors.
   btree_multiset_container(std::initializer_list<init_type> init,
                            const key_compare &comp = key_compare(),
                            const allocator_type &alloc = allocator_type())
       : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
-
-  // Lookup routines.
-  template <typename K = key_type>
-  size_type count(const key_arg<K> &key) const {
-    return this->tree_.count_multi(key);
-  }
+  btree_multiset_container(std::initializer_list<init_type> init,
+                           const allocator_type &alloc)
+      : btree_multiset_container(init.begin(), init.end(), alloc) {}
 
   // Insertion routines.
-  iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
-  iterator insert(value_type &&v) {
+  iterator insert(const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return this->tree_.insert_multi(v);
+  }
+  iterator insert(value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_.insert_multi(std::move(v));
   }
-  iterator insert(const_iterator hint, const value_type &v) {
+  iterator insert(const_iterator hint,
+                  const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_.insert_hint_multi(iterator(hint), v);
   }
-  iterator insert(const_iterator hint, value_type &&v) {
+  iterator insert(const_iterator hint,
+                  value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
   }
   template <typename InputIterator>
@@ -575,15 +666,22 @@
     this->tree_.insert_iterator_multi(init.begin(), init.end());
   }
   template <typename... Args>
-  iterator emplace(Args &&... args) {
-    return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+  iterator emplace(Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    // Use a node handle to manage a temp slot.
+    auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+                                                   std::forward<Args>(args)...);
+    return this->tree_.insert_multi(CommonAccess::GetSlot(node));
   }
   template <typename... Args>
-  iterator emplace_hint(const_iterator hint, Args &&... args) {
-    return this->tree_.insert_hint_multi(
-        iterator(hint), init_type(std::forward<Args>(args)...));
+  iterator emplace_hint(const_iterator hint,
+                        Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    // Use a node handle to manage a temp slot.
+    auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+                                                   std::forward<Args>(args)...);
+    return this->tree_.insert_hint_multi(iterator(hint),
+                                         CommonAccess::GetSlot(node));
   }
-  iterator insert(node_type &&node) {
+  iterator insert(node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (!node) return this->end();
     iterator res =
         this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)),
@@ -591,7 +689,8 @@
     CommonAccess::Destroy(&node);
     return res;
   }
-  iterator insert(const_iterator hint, node_type &&node) {
+  iterator insert(const_iterator hint,
+                  node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (!node) return this->end();
     iterator res = this->tree_.insert_hint_multi(
         iterator(hint),
@@ -600,18 +699,13 @@
     return res;
   }
 
-  // Deletion routines.
-  template <typename K = key_type>
-  size_type erase(const key_arg<K> &key) {
-    return this->tree_.erase_multi(key);
-  }
-  using super_type::erase;
-
   // Node extraction routines.
   template <typename K = key_type>
   node_type extract(const key_arg<K> &key) {
-    auto it = this->find(key);
-    return it == this->end() ? node_type() : extract(it);
+    const std::pair<iterator, bool> lower_and_equal =
+        this->tree_.lower_bound_equal(key);
+    return lower_and_equal.second ? extract(lower_and_equal.first)
+                                  : node_type();
   }
   using super_type::extract;
 
@@ -627,8 +721,9 @@
                            typename T::params_type::is_map_container>>::value,
           int> = 0>
   void merge(btree_container<T> &src) {  // NOLINT
-    insert(std::make_move_iterator(src.begin()),
-           std::make_move_iterator(src.end()));
+    for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
+      insert(std::move(params_type::element(src_it.slot())));
+    }
     src.clear();
   }
 
@@ -651,6 +746,7 @@
 class btree_multimap_container : public btree_multiset_container<Tree> {
   using super_type = btree_multiset_container<Tree>;
   using params_type = typename Tree::params_type;
+  friend class BtreeNodePeer;
 
  public:
   using mapped_type = typename params_type::mapped_type;
diff --git a/abseil-cpp/absl/container/internal/common.h b/abseil-cpp/absl/container/internal/common.h
index 030e9d4..9239bb4 100644
--- a/abseil-cpp/absl/container/internal/common.h
+++ b/abseil-cpp/absl/container/internal/common.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
-#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#ifndef ABSL_CONTAINER_INTERNAL_COMMON_H_
+#define ABSL_CONTAINER_INTERNAL_COMMON_H_
 
 #include <cassert>
 #include <type_traits>
@@ -84,10 +84,11 @@
     PolicyTraits::transfer(alloc(), slot(), s);
   }
 
-  struct move_tag_t {};
-  node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+  struct construct_tag_t {};
+  template <typename... Args>
+  node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args)
       : alloc_(a) {
-    PolicyTraits::construct(alloc(), slot(), s);
+    PolicyTraits::construct(alloc(), slot(), std::forward<Args>(args)...);
   }
 
   void destroy() {
@@ -186,8 +187,8 @@
   }
 
   template <typename T, typename... Args>
-  static T Move(Args&&... args) {
-    return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+  static T Construct(Args&&... args) {
+    return T(typename T::construct_tag_t{}, std::forward<Args>(args)...);
   }
 };
 
@@ -203,4 +204,4 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#endif  // ABSL_CONTAINER_INTERNAL_COMMON_H_
diff --git a/abseil-cpp/absl/container/internal/common_policy_traits.h b/abseil-cpp/absl/container/internal/common_policy_traits.h
new file mode 100644
index 0000000..3558a54
--- /dev/null
+++ b/abseil-cpp/absl/container/internal/common_policy_traits.h
@@ -0,0 +1,132 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
+#define ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
+
+#include <cstddef>
+#include <cstring>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Defines how slots are initialized/destroyed/moved.
+template <class Policy, class = void>
+struct common_policy_traits {
+  // The actual object stored in the container.
+  using slot_type = typename Policy::slot_type;
+  using reference = decltype(Policy::element(std::declval<slot_type*>()));
+  using value_type = typename std::remove_reference<reference>::type;
+
+  // PRECONDITION: `slot` is UNINITIALIZED
+  // POSTCONDITION: `slot` is INITIALIZED
+  template <class Alloc, class... Args>
+  static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+    Policy::construct(alloc, slot, std::forward<Args>(args)...);
+  }
+
+  // PRECONDITION: `slot` is INITIALIZED
+  // POSTCONDITION: `slot` is UNINITIALIZED
+  template <class Alloc>
+  static void destroy(Alloc* alloc, slot_type* slot) {
+    Policy::destroy(alloc, slot);
+  }
+
+  // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
+  // allocator inside `old_slot` to `new_slot` can be transferred.
+  //
+  // OPTIONAL: defaults to:
+  //
+  //     clone(new_slot, std::move(*old_slot));
+  //     destroy(old_slot);
+  //
+  // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
+  // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
+  //                UNINITIALIZED
+  template <class Alloc>
+  static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
+    transfer_impl(alloc, new_slot, old_slot, Rank0{});
+  }
+
+  // PRECONDITION: `slot` is INITIALIZED
+  // POSTCONDITION: `slot` is INITIALIZED
+  // Note: we use remove_const_t so that the two overloads have different args
+  // in the case of sets with explicitly const value_types.
+  template <class P = Policy>
+  static auto element(absl::remove_const_t<slot_type>* slot)
+      -> decltype(P::element(slot)) {
+    return P::element(slot);
+  }
+  template <class P = Policy>
+  static auto element(const slot_type* slot) -> decltype(P::element(slot)) {
+    return P::element(slot);
+  }
+
+  static constexpr bool transfer_uses_memcpy() {
+    return std::is_same<decltype(transfer_impl<std::allocator<char>>(
+                            nullptr, nullptr, nullptr, Rank0{})),
+                        std::true_type>::value;
+  }
+
+ private:
+  // To rank the overloads below for overload resolution. Rank0 is preferred.
+  struct Rank2 {};
+  struct Rank1 : Rank2 {};
+  struct Rank0 : Rank1 {};
+
+  // Use auto -> decltype as an enabler.
+  template <class Alloc, class P = Policy>
+  static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
+                            slot_type* old_slot, Rank0)
+      -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
+    P::transfer(alloc, new_slot, old_slot);
+  }
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+  // This overload returns true_type for the trait below.
+  // The conditional_t is to make the enabler type dependent.
+  template <class Alloc,
+            typename = std::enable_if_t<absl::is_trivially_relocatable<
+                std::conditional_t<false, Alloc, value_type>>::value>>
+  static std::true_type transfer_impl(Alloc*, slot_type* new_slot,
+                                      slot_type* old_slot, Rank1) {
+    // TODO(b/247130232): remove casts after fixing warnings.
+    // TODO(b/251814870): remove casts after fixing warnings.
+    std::memcpy(
+        static_cast<void*>(std::launder(
+            const_cast<std::remove_const_t<value_type>*>(&element(new_slot)))),
+        static_cast<const void*>(&element(old_slot)), sizeof(value_type));
+    return {};
+  }
+#endif
+
+  template <class Alloc>
+  static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+                            slot_type* old_slot, Rank2) {
+    construct(alloc, new_slot, std::move(element(old_slot)));
+    destroy(alloc, old_slot);
+  }
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
diff --git a/abseil-cpp/absl/container/internal/common_policy_traits_test.cc b/abseil-cpp/absl/container/internal/common_policy_traits_test.cc
new file mode 100644
index 0000000..5eaa4aa
--- /dev/null
+++ b/abseil-cpp/absl/container/internal/common_policy_traits_test.cc
@@ -0,0 +1,120 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/common_policy_traits.h"
+
+#include <functional>
+#include <memory>
+#include <new>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::MockFunction;
+using ::testing::AnyNumber;
+using ::testing::ReturnRef;
+
+using Slot = int;
+
+struct PolicyWithoutOptionalOps {
+  using slot_type = Slot;
+  using key_type = Slot;
+  using init_type = Slot;
+
+  static std::function<void(void*, Slot*, Slot)> construct;
+  static std::function<void(void*, Slot*)> destroy;
+
+  static std::function<Slot&(Slot*)> element;
+};
+
+std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
+std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
+
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
+
+struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
+  static std::function<void(void*, Slot*, Slot*)> transfer;
+};
+
+std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
+
+struct Test : ::testing::Test {
+  Test() {
+    PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
+      construct.Call(a1, a2, std::move(a3));
+    };
+    PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
+      destroy.Call(a1, a2);
+    };
+
+    PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
+      return element.Call(a1);
+    };
+
+    PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
+      return transfer.Call(a1, a2, a3);
+    };
+  }
+
+  std::allocator<Slot> alloc;
+  int a = 53;
+
+  MockFunction<void(void*, Slot*, Slot)> construct;
+  MockFunction<void(void*, Slot*)> destroy;
+
+  MockFunction<Slot&(Slot*)> element;
+
+  MockFunction<void(void*, Slot*, Slot*)> transfer;
+};
+
+TEST_F(Test, construct) {
+  EXPECT_CALL(construct, Call(&alloc, &a, 53));
+  common_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
+}
+
+TEST_F(Test, destroy) {
+  EXPECT_CALL(destroy, Call(&alloc, &a));
+  common_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
+}
+
+TEST_F(Test, element) {
+  int b = 0;
+  EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
+  EXPECT_EQ(&b, &common_policy_traits<PolicyWithoutOptionalOps>::element(&a));
+}
+
+TEST_F(Test, without_transfer) {
+  int b = 42;
+  EXPECT_CALL(element, Call(&a)).Times(AnyNumber()).WillOnce(ReturnRef(a));
+  EXPECT_CALL(element, Call(&b)).WillOnce(ReturnRef(b));
+  EXPECT_CALL(construct, Call(&alloc, &a, b)).Times(AnyNumber());
+  EXPECT_CALL(destroy, Call(&alloc, &b)).Times(AnyNumber());
+  common_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+TEST_F(Test, with_transfer) {
+  int b = 42;
+  EXPECT_CALL(transfer, Call(&alloc, &a, &b));
+  common_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/container/internal/compressed_tuple.h b/abseil-cpp/absl/container/internal/compressed_tuple.h
index 02bfd03..59e70eb 100644
--- a/abseil-cpp/absl/container/internal/compressed_tuple.h
+++ b/abseil-cpp/absl/container/internal/compressed_tuple.h
@@ -64,19 +64,6 @@
 template <typename D, size_t I>
 using ElemT = typename Elem<D, I>::type;
 
-// Use the __is_final intrinsic if available. Where it's not available, classes
-// declared with the 'final' specifier cannot be used as CompressedTuple
-// elements.
-// TODO(sbenza): Replace this with std::is_final in C++14.
-template <typename T>
-constexpr bool IsFinal() {
-#if defined(__clang__) || defined(__GNUC__)
-  return __is_final(T);
-#else
-  return false;
-#endif
-}
-
 // We can't use EBCO on other CompressedTuples because that would mean that we
 // derive from multiple Storage<> instantiations with the same I parameter,
 // and potentially from multiple identical Storage<> instantiations.  So anytime
@@ -86,20 +73,15 @@
 
 template <typename T>
 constexpr bool ShouldUseBase() {
-  return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+  return std::is_class<T>::value && std::is_empty<T>::value &&
+         !std::is_final<T>::value &&
          !std::is_base_of<uses_inheritance, T>::value;
 }
 
 // The storage class provides two specializations:
 //  - For empty classes, it stores T as a base class.
 //  - For everything else, it stores T as a member.
-template <typename T, size_t I,
-#if defined(_MSC_VER)
-          bool UseBase =
-              ShouldUseBase<typename std::enable_if<true, T>::type>()>
-#else
-          bool UseBase = ShouldUseBase<T>()>
-#endif
+template <typename T, size_t I, bool UseBase = ShouldUseBase<T>()>
 struct Storage {
   T value;
   constexpr Storage() = default;
@@ -257,7 +239,7 @@
 
   template <int I>
   ElemT<I>& get() & {
-    return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+    return StorageT<I>::get();
   }
 
   template <int I>
diff --git a/abseil-cpp/absl/container/internal/compressed_tuple_test.cc b/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
index 62a7483..74111f9 100644
--- a/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
+++ b/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
@@ -403,6 +403,16 @@
 }
 #endif
 
+// TODO(b/214288561): enable this test.
+TEST(CompressedTupleTest, DISABLED_NestedEbo) {
+  struct Empty1 {};
+  struct Empty2 {};
+  CompressedTuple<Empty1, CompressedTuple<Empty2>, int> x;
+  CompressedTuple<Empty1, Empty2, int> y;
+  // Currently fails with sizeof(x) == 8, sizeof(y) == 4.
+  EXPECT_EQ(sizeof(x), sizeof(y));
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/container_memory.h b/abseil-cpp/absl/container/internal/container_memory.h
index e67529e..f59ca4e 100644
--- a/abseil-cpp/absl/container/internal/container_memory.h
+++ b/abseil-cpp/absl/container/internal/container_memory.h
@@ -17,6 +17,7 @@
 
 #include <cassert>
 #include <cstddef>
+#include <cstring>
 #include <memory>
 #include <new>
 #include <tuple>
@@ -164,7 +165,7 @@
       std::forward<F>(f));
 }
 
-// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// Given arguments of an std::pair's constructor, PairArgs() returns a pair of
 // tuples with references to the passed arguments. The tuples contain
 // constructor arguments for the first and the second elements of the pair.
 //
@@ -174,7 +175,7 @@
 //
 // 2. auto a = PairArgs(args...);
 //    std::pair<F, S> p(std::piecewise_construct,
-//                      std::move(p.first), std::move(p.second));
+//                      std::move(a.first), std::move(a.second));
 inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
 template <class F, class S>
 std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
@@ -340,7 +341,8 @@
 struct map_slot_policy {
   using slot_type = map_slot_type<K, V>;
   using value_type = std::pair<const K, V>;
-  using mutable_value_type = std::pair<K, V>;
+  using mutable_value_type =
+      std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
 
  private:
   static void emplace(slot_type* slot) {
@@ -402,6 +404,15 @@
     }
   }
 
+  // Construct this slot by copying from another slot.
+  template <class Allocator>
+  static void construct(Allocator* alloc, slot_type* slot,
+                        const slot_type* other) {
+    emplace(slot);
+    absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+                                                 other->value);
+  }
+
   template <class Allocator>
   static void destroy(Allocator* alloc, slot_type* slot) {
     if (kMutableKeys::value) {
@@ -415,6 +426,16 @@
   static void transfer(Allocator* alloc, slot_type* new_slot,
                        slot_type* old_slot) {
     emplace(new_slot);
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+    if (absl::is_trivially_relocatable<value_type>()) {
+      // TODO(b/247130232,b/251814870): remove casts after fixing warnings.
+      std::memcpy(static_cast<void*>(std::launder(&new_slot->value)),
+                  static_cast<const void*>(&old_slot->value),
+                  sizeof(value_type));
+      return;
+    }
+#endif
+
     if (kMutableKeys::value) {
       absl::allocator_traits<Allocator>::construct(
           *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
@@ -424,33 +445,6 @@
     }
     destroy(alloc, old_slot);
   }
-
-  template <class Allocator>
-  static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
-    if (kMutableKeys::value) {
-      using std::swap;
-      swap(a->mutable_value, b->mutable_value);
-    } else {
-      value_type tmp = std::move(a->value);
-      absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
-      absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
-                                                   std::move(b->value));
-      absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
-      absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
-                                                   std::move(tmp));
-    }
-  }
-
-  template <class Allocator>
-  static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
-    if (kMutableKeys::value) {
-      dest->mutable_value = std::move(src->mutable_value);
-    } else {
-      absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
-      absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
-                                                   std::move(src->value));
-    }
-  }
 };
 
 }  // namespace container_internal
diff --git a/abseil-cpp/absl/container/internal/container_memory_test.cc b/abseil-cpp/absl/container/internal/container_memory_test.cc
index 6a7fcd2..fb9c4dd 100644
--- a/abseil-cpp/absl/container/internal/container_memory_test.cc
+++ b/abseil-cpp/absl/container/internal/container_memory_test.cc
@@ -166,7 +166,7 @@
 }
 
 TEST(DecomposeValue, Decomposable) {
-  auto f = [](const int& x, int&& y) {
+  auto f = [](const int& x, int&& y) {  // NOLINT
     EXPECT_EQ(&x, &y);
     EXPECT_EQ(42, x);
     return 'A';
@@ -200,7 +200,8 @@
 }
 
 TEST(DecomposePair, Decomposable) {
-  auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k,
+  auto f = [](const int& x,  // NOLINT
+              std::piecewise_construct_t, std::tuple<int&&> k,
               std::tuple<double>&& v) {
     EXPECT_EQ(&x, &std::get<0>(k));
     EXPECT_EQ(42, x);
diff --git a/abseil-cpp/absl/container/internal/counting_allocator.h b/abseil-cpp/absl/container/internal/counting_allocator.h
index 927cf08..66068a5 100644
--- a/abseil-cpp/absl/container/internal/counting_allocator.h
+++ b/abseil-cpp/absl/container/internal/counting_allocator.h
@@ -80,7 +80,15 @@
   template <typename U>
   void destroy(U* p) {
     Allocator allocator;
+    // Ignore GCC warning bug.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuse-after-free"
+#endif
     AllocatorTraits::destroy(allocator, p);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
     if (instance_count_ != nullptr) {
       *instance_count_ -= 1;
     }
diff --git a/abseil-cpp/absl/container/internal/hash_function_defaults.h b/abseil-cpp/absl/container/internal/hash_function_defaults.h
index 0683422..a3613b4 100644
--- a/abseil-cpp/absl/container/internal/hash_function_defaults.h
+++ b/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -56,6 +56,10 @@
 #include "absl/strings/cord.h"
 #include "absl/strings/string_view.h"
 
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
@@ -78,24 +82,26 @@
   }
 };
 
+struct StringEq {
+  using is_transparent = void;
+  bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+    return lhs == rhs;
+  }
+  bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+    return lhs == rhs;
+  }
+  bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+    return lhs == rhs;
+  }
+  bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+    return lhs == rhs;
+  }
+};
+
 // Supports heterogeneous lookup for string-like elements.
 struct StringHashEq {
   using Hash = StringHash;
-  struct Eq {
-    using is_transparent = void;
-    bool operator()(absl::string_view lhs, absl::string_view rhs) const {
-      return lhs == rhs;
-    }
-    bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
-      return lhs == rhs;
-    }
-    bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
-      return lhs == rhs;
-    }
-    bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
-      return lhs == rhs;
-    }
-  };
+  using Eq = StringEq;
 };
 
 template <>
@@ -105,6 +111,48 @@
 template <>
 struct HashEq<absl::Cord> : StringHashEq {};
 
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+
+template <typename TChar>
+struct BasicStringHash {
+  using is_transparent = void;
+
+  size_t operator()(std::basic_string_view<TChar> v) const {
+    return absl::Hash<std::basic_string_view<TChar>>{}(v);
+  }
+};
+
+template <typename TChar>
+struct BasicStringEq {
+  using is_transparent = void;
+  bool operator()(std::basic_string_view<TChar> lhs,
+                  std::basic_string_view<TChar> rhs) const {
+    return lhs == rhs;
+  }
+};
+
+// Supports heterogeneous lookup for w/u16/u32 string + string_view + char*.
+template <typename TChar>
+struct BasicStringHashEq {
+  using Hash = BasicStringHash<TChar>;
+  using Eq = BasicStringEq<TChar>;
+};
+
+template <>
+struct HashEq<std::wstring> : BasicStringHashEq<wchar_t> {};
+template <>
+struct HashEq<std::wstring_view> : BasicStringHashEq<wchar_t> {};
+template <>
+struct HashEq<std::u16string> : BasicStringHashEq<char16_t> {};
+template <>
+struct HashEq<std::u16string_view> : BasicStringHashEq<char16_t> {};
+template <>
+struct HashEq<std::u32string> : BasicStringHashEq<char32_t> {};
+template <>
+struct HashEq<std::u32string_view> : BasicStringHashEq<char32_t> {};
+
+#endif  // ABSL_HAVE_STD_STRING_VIEW
+
 // Supports heterogeneous lookup for pointers and smart pointers.
 template <class T>
 struct HashEq<T*> {
diff --git a/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc b/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
index 59576b8..c31af3b 100644
--- a/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
+++ b/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
@@ -24,6 +24,10 @@
 #include "absl/strings/cord_test_helpers.h"
 #include "absl/strings/string_view.h"
 
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
@@ -109,6 +113,168 @@
   EXPECT_NE(h, hash(std::string("b")));
 }
 
+TEST(BasicStringViewTest, WStringEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_eq<std::wstring> eq;
+  EXPECT_TRUE(eq(L"a", L"a"));
+  EXPECT_TRUE(eq(L"a", std::wstring_view(L"a")));
+  EXPECT_TRUE(eq(L"a", std::wstring(L"a")));
+  EXPECT_FALSE(eq(L"a", L"b"));
+  EXPECT_FALSE(eq(L"a", std::wstring_view(L"b")));
+  EXPECT_FALSE(eq(L"a", std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, WStringViewEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_eq<std::wstring_view> eq;
+  EXPECT_TRUE(eq(L"a", L"a"));
+  EXPECT_TRUE(eq(L"a", std::wstring_view(L"a")));
+  EXPECT_TRUE(eq(L"a", std::wstring(L"a")));
+  EXPECT_FALSE(eq(L"a", L"b"));
+  EXPECT_FALSE(eq(L"a", std::wstring_view(L"b")));
+  EXPECT_FALSE(eq(L"a", std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_eq<std::u16string> eq;
+  EXPECT_TRUE(eq(u"a", u"a"));
+  EXPECT_TRUE(eq(u"a", std::u16string_view(u"a")));
+  EXPECT_TRUE(eq(u"a", std::u16string(u"a")));
+  EXPECT_FALSE(eq(u"a", u"b"));
+  EXPECT_FALSE(eq(u"a", std::u16string_view(u"b")));
+  EXPECT_FALSE(eq(u"a", std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringViewEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_eq<std::u16string_view> eq;
+  EXPECT_TRUE(eq(u"a", u"a"));
+  EXPECT_TRUE(eq(u"a", std::u16string_view(u"a")));
+  EXPECT_TRUE(eq(u"a", std::u16string(u"a")));
+  EXPECT_FALSE(eq(u"a", u"b"));
+  EXPECT_FALSE(eq(u"a", std::u16string_view(u"b")));
+  EXPECT_FALSE(eq(u"a", std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_eq<std::u32string> eq;
+  EXPECT_TRUE(eq(U"a", U"a"));
+  EXPECT_TRUE(eq(U"a", std::u32string_view(U"a")));
+  EXPECT_TRUE(eq(U"a", std::u32string(U"a")));
+  EXPECT_FALSE(eq(U"a", U"b"));
+  EXPECT_FALSE(eq(U"a", std::u32string_view(U"b")));
+  EXPECT_FALSE(eq(U"a", std::u32string(U"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringViewEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_eq<std::u32string_view> eq;
+  EXPECT_TRUE(eq(U"a", U"a"));
+  EXPECT_TRUE(eq(U"a", std::u32string_view(U"a")));
+  EXPECT_TRUE(eq(U"a", std::u32string(U"a")));
+  EXPECT_FALSE(eq(U"a", U"b"));
+  EXPECT_FALSE(eq(U"a", std::u32string_view(U"b")));
+  EXPECT_FALSE(eq(U"a", std::u32string(U"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, WStringHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_hash<std::wstring> hash;
+  auto h = hash(L"a");
+  EXPECT_EQ(h, hash(std::wstring_view(L"a")));
+  EXPECT_EQ(h, hash(std::wstring(L"a")));
+  EXPECT_NE(h, hash(std::wstring_view(L"b")));
+  EXPECT_NE(h, hash(std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, WStringViewHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_hash<std::wstring_view> hash;
+  auto h = hash(L"a");
+  EXPECT_EQ(h, hash(std::wstring_view(L"a")));
+  EXPECT_EQ(h, hash(std::wstring(L"a")));
+  EXPECT_NE(h, hash(std::wstring_view(L"b")));
+  EXPECT_NE(h, hash(std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_hash<std::u16string> hash;
+  auto h = hash(u"a");
+  EXPECT_EQ(h, hash(std::u16string_view(u"a")));
+  EXPECT_EQ(h, hash(std::u16string(u"a")));
+  EXPECT_NE(h, hash(std::u16string_view(u"b")));
+  EXPECT_NE(h, hash(std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringViewHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_hash<std::u16string_view> hash;
+  auto h = hash(u"a");
+  EXPECT_EQ(h, hash(std::u16string_view(u"a")));
+  EXPECT_EQ(h, hash(std::u16string(u"a")));
+  EXPECT_NE(h, hash(std::u16string_view(u"b")));
+  EXPECT_NE(h, hash(std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_hash<std::u32string> hash;
+  auto h = hash(U"a");
+  EXPECT_EQ(h, hash(std::u32string_view(U"a")));
+  EXPECT_EQ(h, hash(std::u32string(U"a")));
+  EXPECT_NE(h, hash(std::u32string_view(U"b")));
+  EXPECT_NE(h, hash(std::u32string(U"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringViewHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  hash_default_hash<std::u32string_view> hash;
+  auto h = hash(U"a");
+  EXPECT_EQ(h, hash(std::u32string_view(U"a")));
+  EXPECT_EQ(h, hash(std::u32string(U"a")));
+  EXPECT_NE(h, hash(std::u32string_view(U"b")));
+  EXPECT_NE(h, hash(std::u32string(U"b")));
+#endif
+}
+
 struct NoDeleter {
   template <class T>
   void operator()(const T* ptr) const {}
@@ -310,7 +476,7 @@
   hash_default_hash<typename T::first_type> hash;
 };
 
-TYPED_TEST_CASE_P(StringLikeTest);
+TYPED_TEST_SUITE_P(StringLikeTest);
 
 TYPED_TEST_P(StringLikeTest, Eq) {
   EXPECT_TRUE(this->eq(this->a1, this->b1));
diff --git a/abseil-cpp/absl/container/internal/hash_generator_testing.h b/abseil-cpp/absl/container/internal/hash_generator_testing.h
index 6869fe4..f1f555a 100644
--- a/abseil-cpp/absl/container/internal/hash_generator_testing.h
+++ b/abseil-cpp/absl/container/internal/hash_generator_testing.h
@@ -21,11 +21,13 @@
 #include <stdint.h>
 
 #include <algorithm>
+#include <cassert>
 #include <iosfwd>
 #include <random>
 #include <tuple>
 #include <type_traits>
 #include <utility>
+#include <vector>
 
 #include "absl/container/internal/hash_policy_testing.h"
 #include "absl/memory/memory.h"
@@ -153,6 +155,25 @@
                                   typename Container::value_type,
                                   typename Container::key_type>::type>&>()());
 
+// Naive wrapper that performs a linear search of previous values.
+// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
+template <class T, size_t kMaxValues = 64, class E = void>
+struct UniqueGenerator {
+  Generator<T, E> gen;
+  std::vector<T> values;
+
+  T operator()() {
+    assert(values.size() < kMaxValues);
+    for (;;) {
+      T value = gen();
+      if (std::find(values.begin(), values.end(), value) == values.end()) {
+        values.push_back(value);
+        return value;
+      }
+    }
+  }
+};
+
 }  // namespace hash_internal
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/hash_policy_traits.h b/abseil-cpp/absl/container/internal/hash_policy_traits.h
index 46c97b1..164ec12 100644
--- a/abseil-cpp/absl/container/internal/hash_policy_traits.h
+++ b/abseil-cpp/absl/container/internal/hash_policy_traits.h
@@ -21,6 +21,7 @@
 #include <type_traits>
 #include <utility>
 
+#include "absl/container/internal/common_policy_traits.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
@@ -29,7 +30,7 @@
 
 // Defines how slots are initialized/destroyed/moved.
 template <class Policy, class = void>
-struct hash_policy_traits {
+struct hash_policy_traits : common_policy_traits<Policy> {
   // The type of the keys stored in the hashtable.
   using key_type = typename Policy::key_type;
 
@@ -87,43 +88,6 @@
   // Defaults to false if not provided by the policy.
   using constant_iterators = ConstantIteratorsImpl<>;
 
-  // PRECONDITION: `slot` is UNINITIALIZED
-  // POSTCONDITION: `slot` is INITIALIZED
-  template <class Alloc, class... Args>
-  static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
-    Policy::construct(alloc, slot, std::forward<Args>(args)...);
-  }
-
-  // PRECONDITION: `slot` is INITIALIZED
-  // POSTCONDITION: `slot` is UNINITIALIZED
-  template <class Alloc>
-  static void destroy(Alloc* alloc, slot_type* slot) {
-    Policy::destroy(alloc, slot);
-  }
-
-  // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
-  // allocator inside `old_slot` to `new_slot` can be transferred.
-  //
-  // OPTIONAL: defaults to:
-  //
-  //     clone(new_slot, std::move(*old_slot));
-  //     destroy(old_slot);
-  //
-  // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
-  // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
-  //                UNINITIALIZED
-  template <class Alloc>
-  static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
-    transfer_impl(alloc, new_slot, old_slot, 0);
-  }
-
-  // PRECONDITION: `slot` is INITIALIZED
-  // POSTCONDITION: `slot` is INITIALIZED
-  template <class P = Policy>
-  static auto element(slot_type* slot) -> decltype(P::element(slot)) {
-    return P::element(slot);
-  }
-
   // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
   //
   // If `slot` is nullptr, returns the constant amount of memory owned by any
@@ -174,8 +138,8 @@
   // Used for node handle manipulation.
   template <class P = Policy>
   static auto mutable_key(slot_type* slot)
-      -> decltype(P::apply(ReturnKey(), element(slot))) {
-    return P::apply(ReturnKey(), element(slot));
+      -> decltype(P::apply(ReturnKey(), hash_policy_traits::element(slot))) {
+    return P::apply(ReturnKey(), hash_policy_traits::element(slot));
   }
 
   // Returns the "value" (as opposed to the "key") portion of the element. Used
@@ -184,21 +148,6 @@
   static auto value(T* elem) -> decltype(P::value(elem)) {
     return P::value(elem);
   }
-
- private:
-  // Use auto -> decltype as an enabler.
-  template <class Alloc, class P = Policy>
-  static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
-                            slot_type* old_slot, int)
-      -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
-    P::transfer(alloc, new_slot, old_slot);
-  }
-  template <class Alloc>
-  static void transfer_impl(Alloc* alloc, slot_type* new_slot,
-                            slot_type* old_slot, char) {
-    construct(alloc, new_slot, std::move(element(old_slot)));
-    destroy(alloc, old_slot);
-  }
 };
 
 }  // namespace container_internal
diff --git a/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc b/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc
index 6ef8b9e..82d7cc3 100644
--- a/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc
+++ b/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc
@@ -38,81 +38,31 @@
   using key_type = Slot;
   using init_type = Slot;
 
-  static std::function<void(void*, Slot*, Slot)> construct;
-  static std::function<void(void*, Slot*)> destroy;
-
   static std::function<Slot&(Slot*)> element;
   static int apply(int v) { return apply_impl(v); }
   static std::function<int(int)> apply_impl;
   static std::function<Slot&(Slot*)> value;
 };
 
-std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
-std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
-
-std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
 std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
 std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value;
 
-struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
-  static std::function<void(void*, Slot*, Slot*)> transfer;
-};
-
-std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
-
 struct Test : ::testing::Test {
   Test() {
-    PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
-      construct.Call(a1, a2, std::move(a3));
-    };
-    PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
-      destroy.Call(a1, a2);
-    };
-
-    PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
-      return element.Call(a1);
-    };
     PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int {
       return apply.Call(a1);
     };
     PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& {
       return value.Call(a1);
     };
-
-    PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
-      return transfer.Call(a1, a2, a3);
-    };
   }
 
   std::allocator<int> alloc;
   int a = 53;
-
-  MockFunction<void(void*, Slot*, Slot)> construct;
-  MockFunction<void(void*, Slot*)> destroy;
-
-  MockFunction<Slot&(Slot*)> element;
   MockFunction<int(int)> apply;
   MockFunction<Slot&(Slot*)> value;
-
-  MockFunction<void(void*, Slot*, Slot*)> transfer;
 };
 
-TEST_F(Test, construct) {
-  EXPECT_CALL(construct, Call(&alloc, &a, 53));
-  hash_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
-}
-
-TEST_F(Test, destroy) {
-  EXPECT_CALL(destroy, Call(&alloc, &a));
-  hash_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
-}
-
-TEST_F(Test, element) {
-  int b = 0;
-  EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
-  EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::element(&a));
-}
-
 TEST_F(Test, apply) {
   EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337));
   EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42)));
@@ -124,20 +74,6 @@
   EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
 }
 
-TEST_F(Test, without_transfer) {
-  int b = 42;
-  EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b));
-  EXPECT_CALL(construct, Call(&alloc, &a, b));
-  EXPECT_CALL(destroy, Call(&alloc, &b));
-  hash_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
-}
-
-TEST_F(Test, with_transfer) {
-  int b = 42;
-  EXPECT_CALL(transfer, Call(&alloc, &a, &b));
-  hash_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
-}
-
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index e4484fb..79a0973 100644
--- a/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -14,6 +14,7 @@
 
 #include "absl/container/internal/hashtablez_sampler.h"
 
+#include <algorithm>
 #include <atomic>
 #include <cassert>
 #include <cmath>
@@ -21,49 +22,57 @@
 #include <limits>
 
 #include "absl/base/attributes.h"
-#include "absl/base/internal/exponential_biased.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
 #include "absl/debugging/stacktrace.h"
 #include "absl/memory/memory.h"
+#include "absl/profiling/internal/exponential_biased.h"
+#include "absl/profiling/internal/sample_recorder.h"
 #include "absl/synchronization/mutex.h"
+#include "absl/time/clock.h"
+#include "absl/utility/utility.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr int HashtablezInfo::kMaxStackDepth;
+#endif
 
 namespace {
 ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
     false
 };
 ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
-ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr};
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
+ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
     g_exponential_biased_generator;
 #endif
 
+void TriggerHashtablezConfigListener() {
+  auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire);
+  if (listener != nullptr) listener();
+}
+
 }  // namespace
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
 #endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
-HashtablezSampler& HashtablezSampler::Global() {
+HashtablezSampler& GlobalHashtablezSampler() {
   static auto* sampler = new HashtablezSampler();
   return *sampler;
 }
 
-HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
-    DisposeCallback f) {
-  return dispose_.exchange(f, std::memory_order_relaxed);
-}
-
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::HashtablezInfo() = default;
 HashtablezInfo::~HashtablezInfo() = default;
 
-void HashtablezInfo::PrepareForSampling() {
+void HashtablezInfo::PrepareForSampling(int64_t stride,
+                                        size_t inline_element_size_value) {
   capacity.store(0, std::memory_order_relaxed);
   size.store(0, std::memory_order_relaxed);
   num_erases.store(0, std::memory_order_relaxed);
@@ -72,100 +81,17 @@
   total_probe_length.store(0, std::memory_order_relaxed);
   hashes_bitwise_or.store(0, std::memory_order_relaxed);
   hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+  hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+  max_reserve.store(0, std::memory_order_relaxed);
 
   create_time = absl::Now();
+  weight = stride;
   // The inliner makes hardcoded skip_count difficult (especially when combined
   // with LTO).  We use the ability to exclude stacks by regex when encoding
   // instead.
   depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
                               /* skip_count= */ 0);
-  dead = nullptr;
-}
-
-HashtablezSampler::HashtablezSampler()
-    : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
-  absl::MutexLock l(&graveyard_.init_mu);
-  graveyard_.dead = &graveyard_;
-}
-
-HashtablezSampler::~HashtablezSampler() {
-  HashtablezInfo* s = all_.load(std::memory_order_acquire);
-  while (s != nullptr) {
-    HashtablezInfo* next = s->next;
-    delete s;
-    s = next;
-  }
-}
-
-void HashtablezSampler::PushNew(HashtablezInfo* sample) {
-  sample->next = all_.load(std::memory_order_relaxed);
-  while (!all_.compare_exchange_weak(sample->next, sample,
-                                     std::memory_order_release,
-                                     std::memory_order_relaxed)) {
-  }
-}
-
-void HashtablezSampler::PushDead(HashtablezInfo* sample) {
-  if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
-    dispose(*sample);
-  }
-
-  absl::MutexLock graveyard_lock(&graveyard_.init_mu);
-  absl::MutexLock sample_lock(&sample->init_mu);
-  sample->dead = graveyard_.dead;
-  graveyard_.dead = sample;
-}
-
-HashtablezInfo* HashtablezSampler::PopDead() {
-  absl::MutexLock graveyard_lock(&graveyard_.init_mu);
-
-  // The list is circular, so eventually it collapses down to
-  //   graveyard_.dead == &graveyard_
-  // when it is empty.
-  HashtablezInfo* sample = graveyard_.dead;
-  if (sample == &graveyard_) return nullptr;
-
-  absl::MutexLock sample_lock(&sample->init_mu);
-  graveyard_.dead = sample->dead;
-  sample->PrepareForSampling();
-  return sample;
-}
-
-HashtablezInfo* HashtablezSampler::Register() {
-  int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
-  if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
-    size_estimate_.fetch_sub(1, std::memory_order_relaxed);
-    dropped_samples_.fetch_add(1, std::memory_order_relaxed);
-    return nullptr;
-  }
-
-  HashtablezInfo* sample = PopDead();
-  if (sample == nullptr) {
-    // Resurrection failed.  Hire a new warlock.
-    sample = new HashtablezInfo();
-    PushNew(sample);
-  }
-
-  return sample;
-}
-
-void HashtablezSampler::Unregister(HashtablezInfo* sample) {
-  PushDead(sample);
-  size_estimate_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-int64_t HashtablezSampler::Iterate(
-    const std::function<void(const HashtablezInfo& stack)>& f) {
-  HashtablezInfo* s = all_.load(std::memory_order_acquire);
-  while (s != nullptr) {
-    absl::MutexLock l(&s->init_mu);
-    if (s->dead == nullptr) {
-      f(*s);
-    }
-    s = s->next;
-  }
-
-  return dropped_samples_.load(std::memory_order_relaxed);
+  inline_element_size = inline_element_size_value;
 }
 
 static bool ShouldForceSampling() {
@@ -180,27 +106,40 @@
   if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
 
   if (state == kUninitialized) {
-    state = AbslContainerInternalSampleEverything() ? kForce : kDontForce;
+    state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
+                ? kForce
+                : kDontForce;
     global_state.store(state, std::memory_order_relaxed);
   }
   return state == kForce;
 }
 
-HashtablezInfo* SampleSlow(int64_t* next_sample) {
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+                           size_t inline_element_size) {
   if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
-    *next_sample = 1;
-    return HashtablezSampler::Global().Register();
+    next_sample.next_sample = 1;
+    const int64_t old_stride = exchange(next_sample.sample_stride, 1);
+    HashtablezInfo* result =
+        GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+    return result;
   }
 
 #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-  *next_sample = std::numeric_limits<int64_t>::max();
+  next_sample = {
+      std::numeric_limits<int64_t>::max(),
+      std::numeric_limits<int64_t>::max(),
+  };
   return nullptr;
 #else
-  bool first = *next_sample < 0;
-  *next_sample = g_exponential_biased_generator.GetStride(
+  bool first = next_sample.next_sample < 0;
+
+  const int64_t next_stride = g_exponential_biased_generator.GetStride(
       g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+  next_sample.next_sample = next_stride;
+  const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
   // Small values of interval are equivalent to just sampling next time.
-  ABSL_ASSERT(*next_sample >= 1);
+  ABSL_ASSERT(next_stride >= 1);
 
   // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
   // low enough that we will start sampling in a reasonable time, so we just use
@@ -210,16 +149,53 @@
   // We will only be negative on our first count, so we should just retry in
   // that case.
   if (first) {
-    if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
-    return SampleSlow(next_sample);
+    if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
+    return SampleSlow(next_sample, inline_element_size);
   }
 
-  return HashtablezSampler::Global().Register();
+  return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
 #endif
 }
 
 void UnsampleSlow(HashtablezInfo* info) {
-  HashtablezSampler::Global().Unregister(info);
+  GlobalHashtablezSampler().Unregister(info);
+}
+
+void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+  total_probe_length /= 16;
+#else
+  total_probe_length /= 8;
+#endif
+  info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+  info->num_erases.store(0, std::memory_order_relaxed);
+  // There is only one concurrent writer, so `load` then `store` is sufficient
+  // instead of using `fetch_add`.
+  info->num_rehashes.store(
+      1 + info->num_rehashes.load(std::memory_order_relaxed),
+      std::memory_order_relaxed);
+}
+
+void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) {
+  info->max_reserve.store(
+      (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+                 target_capacity),
+      std::memory_order_relaxed);
+}
+
+void RecordClearedReservationSlow(HashtablezInfo* info) {
+  info->max_reserve.store(0, std::memory_order_relaxed);
+}
+
+void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+                              size_t capacity) {
+  info->size.store(size, std::memory_order_relaxed);
+  info->capacity.store(capacity, std::memory_order_relaxed);
+  if (size == 0) {
+    // This is a clear, reset the total/num_erases too.
+    info->total_probe_length.store(0, std::memory_order_relaxed);
+    info->num_erases.store(0, std::memory_order_relaxed);
+  }
 }
 
 void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@@ -227,7 +203,7 @@
   // SwissTables probe in groups of 16, so scale this to count items probes and
   // not offset from desired.
   size_t probe_length = distance_from_desired;
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
   probe_length /= 16;
 #else
   probe_length /= 8;
@@ -235,6 +211,7 @@
 
   info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
   info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+  info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
   info->max_probe_length.store(
       std::max(info->max_probe_length.load(std::memory_order_relaxed),
                probe_length),
@@ -243,11 +220,41 @@
   info->size.fetch_add(1, std::memory_order_relaxed);
 }
 
+void RecordEraseSlow(HashtablezInfo* info) {
+  info->size.fetch_sub(1, std::memory_order_relaxed);
+  // There is only one concurrent writer, so `load` then `store` is sufficient
+  // instead of using `fetch_add`.
+  info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed),
+                         std::memory_order_relaxed);
+}
+
+void SetHashtablezConfigListener(HashtablezConfigListener l) {
+  g_hashtablez_config_listener.store(l, std::memory_order_release);
+}
+
+bool IsHashtablezEnabled() {
+  return g_hashtablez_enabled.load(std::memory_order_acquire);
+}
+
 void SetHashtablezEnabled(bool enabled) {
+  SetHashtablezEnabledInternal(enabled);
+  TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezEnabledInternal(bool enabled) {
   g_hashtablez_enabled.store(enabled, std::memory_order_release);
 }
 
+int32_t GetHashtablezSampleParameter() {
+  return g_hashtablez_sample_parameter.load(std::memory_order_acquire);
+}
+
 void SetHashtablezSampleParameter(int32_t rate) {
+  SetHashtablezSampleParameterInternal(rate);
+  TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezSampleParameterInternal(int32_t rate) {
   if (rate > 0) {
     g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
   } else {
@@ -256,12 +263,20 @@
   }
 }
 
-void SetHashtablezMaxSamples(int32_t max) {
+size_t GetHashtablezMaxSamples() {
+  return GlobalHashtablezSampler().GetMaxSamples();
+}
+
+void SetHashtablezMaxSamples(size_t max) {
+  SetHashtablezMaxSamplesInternal(max);
+  TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezMaxSamplesInternal(size_t max) {
   if (max > 0) {
-    g_hashtablez_max_samples.store(max, std::memory_order_release);
+    GlobalHashtablezSampler().SetMaxSamples(max);
   } else {
-    ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
-                 static_cast<long long>(max));  // NOLINT(runtime/int)
+    ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0");
   }
 }
 
diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 394348d..d8fd8f3 100644
--- a/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -44,9 +44,10 @@
 #include <memory>
 #include <vector>
 
+#include "absl/base/config.h"
 #include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/optimization.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/profiling/internal/sample_recorder.h"
 #include "absl/synchronization/mutex.h"
 #include "absl/utility/utility.h"
 
@@ -57,7 +58,7 @@
 // Stores information about a sampled hashtable.  All mutations to this *must*
 // be made through `Record*` functions below.  All reads from this *must* only
 // occur in the callback to `HashtablezSampler::Iterate`.
-struct HashtablezInfo {
+struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
   // Constructs the object but does not fill in any fields.
   HashtablezInfo();
   ~HashtablezInfo();
@@ -66,7 +67,8 @@
 
   // Puts the object into a clean state, fills in the logically `const` members,
   // blocking for any readers that are currently sampling the object.
-  void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+  void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
+      ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
 
   // These fields are mutated by the various Record* APIs and need to be
   // thread-safe.
@@ -78,65 +80,44 @@
   std::atomic<size_t> total_probe_length;
   std::atomic<size_t> hashes_bitwise_or;
   std::atomic<size_t> hashes_bitwise_and;
-
-  // `HashtablezSampler` maintains intrusive linked lists for all samples.  See
-  // comments on `HashtablezSampler::all_` for details on these.  `init_mu`
-  // guards the ability to restore the sample to a pristine state.  This
-  // prevents races with sampling and resurrecting an object.
-  absl::Mutex init_mu;
-  HashtablezInfo* next;
-  HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
+  std::atomic<size_t> hashes_bitwise_xor;
+  std::atomic<size_t> max_reserve;
 
   // All of the fields below are set by `PrepareForSampling`, they must not be
   // mutated in `Record*` functions.  They are logically `const` in that sense.
-  // These are guarded by init_mu, but that is not externalized to clients, who
-  // can only read them during `HashtablezSampler::Iterate` which will hold the
-  // lock.
+  // These are guarded by init_mu, but that is not externalized to clients,
+  // which can read them only during `SampleRecorder::Iterate` which will hold
+  // the lock.
   static constexpr int kMaxStackDepth = 64;
   absl::Time create_time;
   int32_t depth;
   void* stack[kMaxStackDepth];
+  size_t inline_element_size;  // How big is the slot?
 };
 
-inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-  total_probe_length /= 16;
-#else
-  total_probe_length /= 8;
-#endif
-  info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
-  info->num_erases.store(0, std::memory_order_relaxed);
-  // There is only one concurrent writer, so `load` then `store` is sufficient
-  // instead of using `fetch_add`.
-  info->num_rehashes.store(
-      1 + info->num_rehashes.load(std::memory_order_relaxed),
-      std::memory_order_relaxed);
-}
+void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length);
 
-inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
-                                     size_t capacity) {
-  info->size.store(size, std::memory_order_relaxed);
-  info->capacity.store(capacity, std::memory_order_relaxed);
-  if (size == 0) {
-    // This is a clear, reset the total/num_erases too.
-    info->total_probe_length.store(0, std::memory_order_relaxed);
-    info->num_erases.store(0, std::memory_order_relaxed);
-  }
-}
+void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity);
+
+void RecordClearedReservationSlow(HashtablezInfo* info);
+
+void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+                              size_t capacity);
 
 void RecordInsertSlow(HashtablezInfo* info, size_t hash,
                       size_t distance_from_desired);
 
-inline void RecordEraseSlow(HashtablezInfo* info) {
-  info->size.fetch_sub(1, std::memory_order_relaxed);
-  // There is only one concurrent writer, so `load` then `store` is sufficient
-  // instead of using `fetch_add`.
-  info->num_erases.store(
-      1 + info->num_erases.load(std::memory_order_relaxed),
-      std::memory_order_relaxed);
-}
+void RecordEraseSlow(HashtablezInfo* info);
 
-HashtablezInfo* SampleSlow(int64_t* next_sample);
+struct SamplingState {
+  int64_t next_sample;
+  // When we make a sampling decision, we record that distance so we can weight
+  // each sample.
+  int64_t sample_stride;
+};
+
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+                           size_t inline_element_size);
 void UnsampleSlow(HashtablezInfo* info);
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -148,7 +129,10 @@
  public:
   explicit HashtablezInfoHandle() : info_(nullptr) {}
   explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
-  ~HashtablezInfoHandle() {
+
+  // We do not have a destructor. Caller is responsible for calling Unregister
+  // before destroying the handle.
+  void Unregister() {
     if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
     UnsampleSlow(info_);
   }
@@ -176,6 +160,16 @@
     RecordRehashSlow(info_, total_probe_length);
   }
 
+  inline void RecordReservation(size_t target_capacity) {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordReservationSlow(info_, target_capacity);
+  }
+
+  inline void RecordClearedReservation() {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordClearedReservationSlow(info_);
+  }
+
   inline void RecordInsert(size_t hash, size_t distance_from_desired) {
     if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
     RecordInsertSlow(info_, hash, distance_from_desired);
@@ -203,8 +197,11 @@
   explicit HashtablezInfoHandle() = default;
   explicit HashtablezInfoHandle(std::nullptr_t) {}
 
+  inline void Unregister() {}
   inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
   inline void RecordRehash(size_t /*total_probe_length*/) {}
+  inline void RecordReservation(size_t /*target_capacity*/) {}
+  inline void RecordClearedReservation() {}
   inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
   inline void RecordErase() {}
 
@@ -214,105 +211,54 @@
 #endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
 #endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
 // Returns an RAII sampling handle that manages registration and unregistation
 // with the global sampler.
-inline HashtablezInfoHandle Sample() {
+inline HashtablezInfoHandle Sample(
+    size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-  if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+  if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
     return HashtablezInfoHandle(nullptr);
   }
-  return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+  return HashtablezInfoHandle(
+      SampleSlow(global_next_sample, inline_element_size));
 #else
   return HashtablezInfoHandle(nullptr);
 #endif  // !ABSL_PER_THREAD_TLS
 }
 
-// Holds samples and their associated stack traces with a soft limit of
-// `SetHashtablezMaxSamples()`.
-//
-// Thread safe.
-class HashtablezSampler {
- public:
-  // Returns a global Sampler.
-  static HashtablezSampler& Global();
+using HashtablezSampler =
+    ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
 
-  HashtablezSampler();
-  ~HashtablezSampler();
+// Returns a global Sampler.
+HashtablezSampler& GlobalHashtablezSampler();
 
-  // Registers for sampling.  Returns an opaque registration info.
-  HashtablezInfo* Register();
-
-  // Unregisters the sample.
-  void Unregister(HashtablezInfo* sample);
-
-  // The dispose callback will be called on all samples the moment they are
-  // being unregistered. Only affects samples that are unregistered after the
-  // callback has been set.
-  // Returns the previous callback.
-  using DisposeCallback = void (*)(const HashtablezInfo&);
-  DisposeCallback SetDisposeCallback(DisposeCallback f);
-
-  // Iterates over all the registered `StackInfo`s.  Returning the number of
-  // samples that have been dropped.
-  int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
-
- private:
-  void PushNew(HashtablezInfo* sample);
-  void PushDead(HashtablezInfo* sample);
-  HashtablezInfo* PopDead();
-
-  std::atomic<size_t> dropped_samples_;
-  std::atomic<size_t> size_estimate_;
-
-  // Intrusive lock free linked lists for tracking samples.
-  //
-  // `all_` records all samples (they are never removed from this list) and is
-  // terminated with a `nullptr`.
-  //
-  // `graveyard_.dead` is a circular linked list.  When it is empty,
-  // `graveyard_.dead == &graveyard`.  The list is circular so that
-  // every item on it (even the last) has a non-null dead pointer.  This allows
-  // `Iterate` to determine if a given sample is live or dead using only
-  // information on the sample itself.
-  //
-  // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
-  // looks like this (G is the Graveyard):
-  //
-  //           +---+    +---+    +---+    +---+    +---+
-  //    all -->| A |--->| B |--->| C |--->| D |--->| E |
-  //           |   |    |   |    |   |    |   |    |   |
-  //   +---+   |   | +->|   |-+  |   | +->|   |-+  |   |
-  //   | G |   +---+ |  +---+ |  +---+ |  +---+ |  +---+
-  //   |   |         |        |        |        |
-  //   |   | --------+        +--------+        |
-  //   +---+                                    |
-  //     ^                                      |
-  //     +--------------------------------------+
-  //
-  std::atomic<HashtablezInfo*> all_;
-  HashtablezInfo graveyard_;
-
-  std::atomic<DisposeCallback> dispose_;
-};
+using HashtablezConfigListener = void (*)();
+void SetHashtablezConfigListener(HashtablezConfigListener l);
 
 // Enables or disables sampling for Swiss tables.
+bool IsHashtablezEnabled();
 void SetHashtablezEnabled(bool enabled);
+void SetHashtablezEnabledInternal(bool enabled);
 
 // Sets the rate at which Swiss tables will be sampled.
+int32_t GetHashtablezSampleParameter();
 void SetHashtablezSampleParameter(int32_t rate);
+void SetHashtablezSampleParameterInternal(int32_t rate);
 
 // Sets a soft max for the number of samples that will be kept.
-void SetHashtablezMaxSamples(int32_t max);
+size_t GetHashtablezMaxSamples();
+void SetHashtablezMaxSamples(size_t max);
+void SetHashtablezMaxSamplesInternal(size_t max);
 
 // Configuration override.
 // This allows process-wide sampling without depending on order of
 // initialization of static storage duration objects.
 // The definition of this constant is weak, which allows us to inject a
 // different value for it at link time.
-extern "C" bool AbslContainerInternalSampleEverything();
+extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
 
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
index 78b9d36..ed35a7e 100644
--- a/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
+++ b/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -21,7 +21,8 @@
 namespace container_internal {
 
 // See hashtablez_sampler.h for details.
-extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() {
+extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
+    AbslContainerInternalSampleEverything)() {
   return false;
 }
 
diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
index 8d10a1e..665d518 100644
--- a/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
+++ b/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
@@ -21,7 +21,8 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
+#include "absl/profiling/internal/sample_recorder.h"
 #include "absl/synchronization/blocking_counter.h"
 #include "absl/synchronization/internal/thread_pool.h"
 #include "absl/synchronization/mutex.h"
@@ -29,7 +30,7 @@
 #include "absl/time/clock.h"
 #include "absl/time/time.h"
 
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
 constexpr int kProbeLength = 16;
 #else
 constexpr int kProbeLength = 8;
@@ -69,7 +70,9 @@
 }
 
 HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
-  auto* info = s->Register();
+  const int64_t test_stride = 123;
+  const size_t test_element_size = 17;
+  auto* info = s->Register(test_stride, test_element_size);
   assert(info != nullptr);
   info->size.store(size);
   return info;
@@ -77,9 +80,11 @@
 
 TEST(HashtablezInfoTest, PrepareForSampling) {
   absl::Time test_start = absl::Now();
+  const int64_t test_stride = 123;
+  const size_t test_element_size = 17;
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling();
+  info.PrepareForSampling(test_stride, test_element_size);
 
   EXPECT_EQ(info.capacity.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
@@ -89,7 +94,11 @@
   EXPECT_EQ(info.total_probe_length.load(), 0);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
   EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+  EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
+  EXPECT_EQ(info.max_reserve.load(), 0);
   EXPECT_GE(info.create_time, test_start);
+  EXPECT_EQ(info.weight, test_stride);
+  EXPECT_EQ(info.inline_element_size, test_element_size);
 
   info.capacity.store(1, std::memory_order_relaxed);
   info.size.store(1, std::memory_order_relaxed);
@@ -98,9 +107,11 @@
   info.total_probe_length.store(1, std::memory_order_relaxed);
   info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
   info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
+  info.hashes_bitwise_xor.store(1, std::memory_order_relaxed);
+  info.max_reserve.store(1, std::memory_order_relaxed);
   info.create_time = test_start - absl::Hours(20);
 
-  info.PrepareForSampling();
+  info.PrepareForSampling(test_stride * 2, test_element_size);
   EXPECT_EQ(info.capacity.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
   EXPECT_EQ(info.num_erases.load(), 0);
@@ -109,13 +120,19 @@
   EXPECT_EQ(info.total_probe_length.load(), 0);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
   EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+  EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
+  EXPECT_EQ(info.max_reserve.load(), 0);
+  EXPECT_EQ(info.weight, 2 * test_stride);
+  EXPECT_EQ(info.inline_element_size, test_element_size);
   EXPECT_GE(info.create_time, test_start);
 }
 
 TEST(HashtablezInfoTest, RecordStorageChanged) {
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling();
+  const int64_t test_stride = 21;
+  const size_t test_element_size = 19;
+  info.PrepareForSampling(test_stride, test_element_size);
   RecordStorageChangedSlow(&info, 17, 47);
   EXPECT_EQ(info.size.load(), 17);
   EXPECT_EQ(info.capacity.load(), 47);
@@ -127,26 +144,33 @@
 TEST(HashtablezInfoTest, RecordInsert) {
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling();
+  const int64_t test_stride = 25;
+  const size_t test_element_size = 23;
+  info.PrepareForSampling(test_stride, test_element_size);
   EXPECT_EQ(info.max_probe_length.load(), 0);
   RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
   EXPECT_EQ(info.max_probe_length.load(), 6);
   EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
+  EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00);
   RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
   EXPECT_EQ(info.max_probe_length.load(), 6);
   EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
+  EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00);
   RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
   EXPECT_EQ(info.max_probe_length.load(), 12);
   EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
+  EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00);
 }
 
 TEST(HashtablezInfoTest, RecordErase) {
+  const int64_t test_stride = 31;
+  const size_t test_element_size = 29;
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling();
+  info.PrepareForSampling(test_stride, test_element_size);
   EXPECT_EQ(info.num_erases.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
   RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -154,12 +178,15 @@
   RecordEraseSlow(&info);
   EXPECT_EQ(info.size.load(), 0);
   EXPECT_EQ(info.num_erases.load(), 1);
+  EXPECT_EQ(info.inline_element_size, test_element_size);
 }
 
 TEST(HashtablezInfoTest, RecordRehash) {
+  const int64_t test_stride = 33;
+  const size_t test_element_size = 31;
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling();
+  info.PrepareForSampling(test_stride, test_element_size);
   RecordInsertSlow(&info, 0x1, 0);
   RecordInsertSlow(&info, 0x2, kProbeLength);
   RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -178,43 +205,67 @@
   EXPECT_EQ(info.total_probe_length.load(), 3);
   EXPECT_EQ(info.num_erases.load(), 0);
   EXPECT_EQ(info.num_rehashes.load(), 1);
+  EXPECT_EQ(info.inline_element_size, test_element_size);
+}
+
+TEST(HashtablezInfoTest, RecordReservation) {
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  const int64_t test_stride = 35;
+  const size_t test_element_size = 33;
+  info.PrepareForSampling(test_stride, test_element_size);
+  RecordReservationSlow(&info, 3);
+  EXPECT_EQ(info.max_reserve.load(), 3);
+
+  RecordReservationSlow(&info, 2);
+  // High watermark does not change
+  EXPECT_EQ(info.max_reserve.load(), 3);
+
+  RecordReservationSlow(&info, 10);
+  // High watermark does change
+  EXPECT_EQ(info.max_reserve.load(), 10);
 }
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 TEST(HashtablezSamplerTest, SmallSampleParameter) {
+  const size_t test_element_size = 31;
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
 
   for (int i = 0; i < 1000; ++i) {
-    int64_t next_sample = 0;
-    HashtablezInfo* sample = SampleSlow(&next_sample);
-    EXPECT_GT(next_sample, 0);
+    SamplingState next_sample = {0, 0};
+    HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+    EXPECT_GT(next_sample.next_sample, 0);
+    EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
     EXPECT_NE(sample, nullptr);
     UnsampleSlow(sample);
   }
 }
 
 TEST(HashtablezSamplerTest, LargeSampleParameter) {
+  const size_t test_element_size = 31;
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
 
   for (int i = 0; i < 1000; ++i) {
-    int64_t next_sample = 0;
-    HashtablezInfo* sample = SampleSlow(&next_sample);
-    EXPECT_GT(next_sample, 0);
+    SamplingState next_sample = {0, 0};
+    HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+    EXPECT_GT(next_sample.next_sample, 0);
+    EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
     EXPECT_NE(sample, nullptr);
     UnsampleSlow(sample);
   }
 }
 
 TEST(HashtablezSamplerTest, Sample) {
+  const size_t test_element_size = 31;
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
   int64_t num_sampled = 0;
   int64_t total = 0;
   double sample_rate = 0.0;
   for (int i = 0; i < 1000000; ++i) {
-    HashtablezInfoHandle h = Sample();
+    HashtablezInfoHandle h = Sample(test_element_size);
     ++total;
     if (HashtablezInfoHandlePeer::IsSampled(h)) {
       ++num_sampled;
@@ -226,14 +277,17 @@
 }
 
 TEST(HashtablezSamplerTest, Handle) {
-  auto& sampler = HashtablezSampler::Global();
-  HashtablezInfoHandle h(sampler.Register());
+  auto& sampler = GlobalHashtablezSampler();
+  const int64_t test_stride = 41;
+  const size_t test_element_size = 39;
+  HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
   auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
   info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
 
   bool found = false;
   sampler.Iterate([&](const HashtablezInfo& h) {
     if (&h == info) {
+      EXPECT_EQ(h.weight, test_stride);
       EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
       found = true;
     }
@@ -299,18 +353,20 @@
   ThreadPool pool(10);
 
   for (int i = 0; i < 10; ++i) {
-    pool.Schedule([&sampler, &stop]() {
+    const int64_t sampling_stride = 11 + i % 3;
+    const size_t elt_size = 10 + i % 2;
+    pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
       std::random_device rd;
       std::mt19937 gen(rd());
 
       std::vector<HashtablezInfo*> infoz;
       while (!stop.HasBeenNotified()) {
         if (infoz.empty()) {
-          infoz.push_back(sampler.Register());
+          infoz.push_back(sampler.Register(sampling_stride, elt_size));
         }
         switch (std::uniform_int_distribution<>(0, 2)(gen)) {
           case 0: {
-            infoz.push_back(sampler.Register());
+            infoz.push_back(sampler.Register(sampling_stride, elt_size));
             break;
           }
           case 1: {
@@ -319,6 +375,7 @@
             HashtablezInfo* info = infoz[p];
             infoz[p] = infoz.back();
             infoz.pop_back();
+            EXPECT_EQ(info->weight, sampling_stride);
             sampler.Unregister(info);
             break;
           }
diff --git a/abseil-cpp/absl/container/internal/have_sse.h b/abseil-cpp/absl/container/internal/have_sse.h
deleted file mode 100644
index e75e1a1..0000000
--- a/abseil-cpp/absl/container/internal/have_sse.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Shared config probing for SSE instructions used in Swiss tables.
-#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-
-#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#if defined(__SSE2__) ||  \
-    (defined(_MSC_VER) && \
-     (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
-#else
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
-#endif
-#endif
-
-#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#ifdef __SSSE3__
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
-#else
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
-#endif
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
-    !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
-#endif  // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/abseil-cpp/absl/container/internal/inlined_vector.h b/abseil-cpp/absl/container/internal/inlined_vector.h
index 4d80b72..b2a602d 100644
--- a/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
-#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_
 
 #include <algorithm>
 #include <cstddef>
@@ -21,8 +21,11 @@
 #include <iterator>
 #include <limits>
 #include <memory>
+#include <new>
+#include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/macros.h"
 #include "absl/container/internal/compressed_tuple.h"
 #include "absl/memory/memory.h"
@@ -33,96 +36,146 @@
 ABSL_NAMESPACE_BEGIN
 namespace inlined_vector_internal {
 
+// GCC does not deal very well with the below code
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#endif
+
+template <typename A>
+using AllocatorTraits = std::allocator_traits<A>;
+template <typename A>
+using ValueType = typename AllocatorTraits<A>::value_type;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using Pointer = typename AllocatorTraits<A>::pointer;
+template <typename A>
+using ConstPointer = typename AllocatorTraits<A>::const_pointer;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using DifferenceType = typename AllocatorTraits<A>::difference_type;
+template <typename A>
+using Reference = ValueType<A>&;
+template <typename A>
+using ConstReference = const ValueType<A>&;
+template <typename A>
+using Iterator = Pointer<A>;
+template <typename A>
+using ConstIterator = ConstPointer<A>;
+template <typename A>
+using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
+template <typename A>
+using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
+template <typename A>
+using MoveIterator = typename std::move_iterator<Iterator<A>>;
+
 template <typename Iterator>
 using IsAtLeastForwardIterator = std::is_convertible<
     typename std::iterator_traits<Iterator>::iterator_category,
     std::forward_iterator_tag>;
 
-template <typename AllocatorType,
-          typename ValueType =
-              typename absl::allocator_traits<AllocatorType>::value_type>
-using IsMemcpyOk =
-    absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>,
-                      absl::is_trivially_copy_constructible<ValueType>,
-                      absl::is_trivially_copy_assignable<ValueType>,
-                      absl::is_trivially_destructible<ValueType>>;
+template <typename A>
+using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
+template <typename A>
+using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
 
-template <typename AllocatorType, typename Pointer, typename SizeType>
-void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
-                     SizeType destroy_size) {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+template <typename T>
+struct TypeIdentity {
+  using type = T;
+};
 
-  if (destroy_first != nullptr) {
-    for (auto i = destroy_size; i != 0;) {
+// Used for function arguments in template functions to prevent ADL by forcing
+// callers to explicitly specify the template parameter.
+template <typename T>
+using NoTypeDeduction = typename TypeIdentity<T>::type;
+
+template <typename A, bool IsTriviallyDestructible =
+                          absl::is_trivially_destructible<ValueType<A>>::value>
+struct DestroyAdapter;
+
+template <typename A>
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ false> {
+  static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+                              SizeType<A> destroy_size) {
+    for (SizeType<A> i = destroy_size; i != 0;) {
       --i;
-      AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
+      AllocatorTraits<A>::destroy(allocator, destroy_first + i);
     }
-
-#if !defined(NDEBUG)
-    {
-      using ValueType = typename AllocatorTraits::value_type;
-
-      // Overwrite unused memory with `0xab` so we can catch uninitialized
-      // usage.
-      //
-      // Cast to `void*` to tell the compiler that we don't care that we might
-      // be scribbling on a vtable pointer.
-      void* memory_ptr = destroy_first;
-      auto memory_size = destroy_size * sizeof(ValueType);
-      std::memset(memory_ptr, 0xab, memory_size);
-    }
-#endif  // !defined(NDEBUG)
   }
-}
+};
 
-template <typename AllocatorType, typename Pointer, typename ValueAdapter,
-          typename SizeType>
-void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
-                       ValueAdapter* values_ptr, SizeType construct_size) {
-  for (SizeType i = 0; i < construct_size; ++i) {
-    ABSL_INTERNAL_TRY {
-      values_ptr->ConstructNext(alloc_ptr, construct_first + i);
-    }
+template <typename A>
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ true> {
+  static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+                              SizeType<A> destroy_size) {
+    static_cast<void>(allocator);
+    static_cast<void>(destroy_first);
+    static_cast<void>(destroy_size);
+  }
+};
+
+template <typename A>
+struct Allocation {
+  Pointer<A> data = nullptr;
+  SizeType<A> capacity = 0;
+};
+
+template <typename A,
+          bool IsOverAligned =
+              (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+  static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+    return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+            requested_capacity};
+  }
+
+  static void Deallocate(A& allocator, Pointer<A> pointer,
+                         SizeType<A> capacity) {
+    AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+  }
+};
+
+template <typename A, typename ValueAdapter>
+void ConstructElements(NoTypeDeduction<A>& allocator,
+                       Pointer<A> construct_first, ValueAdapter& values,
+                       SizeType<A> construct_size) {
+  for (SizeType<A> i = 0; i < construct_size; ++i) {
+    ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
     ABSL_INTERNAL_CATCH_ANY {
-      inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+      DestroyAdapter<A>::DestroyElements(allocator, construct_first, i);
       ABSL_INTERNAL_RETHROW;
     }
   }
 }
 
-template <typename Pointer, typename ValueAdapter, typename SizeType>
-void AssignElements(Pointer assign_first, ValueAdapter* values_ptr,
-                    SizeType assign_size) {
-  for (SizeType i = 0; i < assign_size; ++i) {
-    values_ptr->AssignNext(assign_first + i);
+template <typename A, typename ValueAdapter>
+void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
+                    SizeType<A> assign_size) {
+  for (SizeType<A> i = 0; i < assign_size; ++i) {
+    values.AssignNext(assign_first + i);
   }
 }
 
-template <typename AllocatorType>
+template <typename A>
 struct StorageView {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-  using Pointer = typename AllocatorTraits::pointer;
-  using SizeType = typename AllocatorTraits::size_type;
-
-  Pointer data;
-  SizeType size;
-  SizeType capacity;
+  Pointer<A> data;
+  SizeType<A> size;
+  SizeType<A> capacity;
 };
 
-template <typename AllocatorType, typename Iterator>
+template <typename A, typename Iterator>
 class IteratorValueAdapter {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-  using Pointer = typename AllocatorTraits::pointer;
-
  public:
   explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
 
-  void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
-    AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+  void ConstructNext(A& allocator, Pointer<A> construct_at) {
+    AllocatorTraits<A>::construct(allocator, construct_at, *it_);
     ++it_;
   }
 
-  void AssignNext(Pointer assign_at) {
+  void AssignNext(Pointer<A> assign_at) {
     *assign_at = *it_;
     ++it_;
   }
@@ -131,166 +184,162 @@
   Iterator it_;
 };
 
-template <typename AllocatorType>
+template <typename A>
 class CopyValueAdapter {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-  using ValueType = typename AllocatorTraits::value_type;
-  using Pointer = typename AllocatorTraits::pointer;
-  using ConstPointer = typename AllocatorTraits::const_pointer;
-
  public:
-  explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {}
+  explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
 
-  void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
-    AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+  void ConstructNext(A& allocator, Pointer<A> construct_at) {
+    AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
   }
 
-  void AssignNext(Pointer assign_at) { *assign_at = *ptr_; }
+  void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
 
  private:
-  ConstPointer ptr_;
+  ConstPointer<A> ptr_;
 };
 
-template <typename AllocatorType>
+template <typename A>
 class DefaultValueAdapter {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-  using ValueType = typename AllocatorTraits::value_type;
-  using Pointer = typename AllocatorTraits::pointer;
-
  public:
   explicit DefaultValueAdapter() {}
 
-  void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
-    AllocatorTraits::construct(*alloc_ptr, construct_at);
+  void ConstructNext(A& allocator, Pointer<A> construct_at) {
+    AllocatorTraits<A>::construct(allocator, construct_at);
   }
 
-  void AssignNext(Pointer assign_at) { *assign_at = ValueType(); }
+  void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
 };
 
-template <typename AllocatorType>
+template <typename A>
 class AllocationTransaction {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-  using Pointer = typename AllocatorTraits::pointer;
-  using SizeType = typename AllocatorTraits::size_type;
-
  public:
-  explicit AllocationTransaction(AllocatorType* alloc_ptr)
-      : alloc_data_(*alloc_ptr, nullptr) {}
+  explicit AllocationTransaction(A& allocator)
+      : allocator_data_(allocator, nullptr), capacity_(0) {}
 
   ~AllocationTransaction() {
     if (DidAllocate()) {
-      AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+      MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
     }
   }
 
   AllocationTransaction(const AllocationTransaction&) = delete;
   void operator=(const AllocationTransaction&) = delete;
 
-  AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
-  Pointer& GetData() { return alloc_data_.template get<1>(); }
-  SizeType& GetCapacity() { return capacity_; }
+  A& GetAllocator() { return allocator_data_.template get<0>(); }
+  Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+  SizeType<A>& GetCapacity() { return capacity_; }
 
   bool DidAllocate() { return GetData() != nullptr; }
-  Pointer Allocate(SizeType capacity) {
-    GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
-    GetCapacity() = capacity;
-    return GetData();
+
+  Pointer<A> Allocate(SizeType<A> requested_capacity) {
+    Allocation<A> result =
+        MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+    GetData() = result.data;
+    GetCapacity() = result.capacity;
+    return result.data;
   }
 
+  ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+    Allocation<A> result = {GetData(), GetCapacity()};
+    Reset();
+    return result;
+  }
+
+ private:
   void Reset() {
     GetData() = nullptr;
     GetCapacity() = 0;
   }
 
- private:
-  container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
-  SizeType capacity_ = 0;
+  container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+  SizeType<A> capacity_;
 };
 
-template <typename AllocatorType>
+template <typename A>
 class ConstructionTransaction {
-  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-  using Pointer = typename AllocatorTraits::pointer;
-  using SizeType = typename AllocatorTraits::size_type;
-
  public:
-  explicit ConstructionTransaction(AllocatorType* alloc_ptr)
-      : alloc_data_(*alloc_ptr, nullptr) {}
+  explicit ConstructionTransaction(A& allocator)
+      : allocator_data_(allocator, nullptr), size_(0) {}
 
   ~ConstructionTransaction() {
     if (DidConstruct()) {
-      inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
-                                               GetData(), GetSize());
+      DestroyAdapter<A>::DestroyElements(GetAllocator(), GetData(), GetSize());
     }
   }
 
   ConstructionTransaction(const ConstructionTransaction&) = delete;
   void operator=(const ConstructionTransaction&) = delete;
 
-  AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
-  Pointer& GetData() { return alloc_data_.template get<1>(); }
-  SizeType& GetSize() { return size_; }
+  A& GetAllocator() { return allocator_data_.template get<0>(); }
+  Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+  SizeType<A>& GetSize() { return size_; }
 
   bool DidConstruct() { return GetData() != nullptr; }
   template <typename ValueAdapter>
-  void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) {
-    inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
-                                               data, values_ptr, size);
+  void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
+    ConstructElements<A>(GetAllocator(), data, values, size);
     GetData() = data;
     GetSize() = size;
   }
-  void Commit() {
+  void Commit() && {
     GetData() = nullptr;
     GetSize() = 0;
   }
 
  private:
-  container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
-  SizeType size_ = 0;
+  container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+  SizeType<A> size_;
 };
 
 template <typename T, size_t N, typename A>
 class Storage {
  public:
-  using AllocatorTraits = absl::allocator_traits<A>;
-  using allocator_type = typename AllocatorTraits::allocator_type;
-  using value_type = typename AllocatorTraits::value_type;
-  using pointer = typename AllocatorTraits::pointer;
-  using const_pointer = typename AllocatorTraits::const_pointer;
-  using size_type = typename AllocatorTraits::size_type;
-  using difference_type = typename AllocatorTraits::difference_type;
+  struct MemcpyPolicy {};
+  struct ElementwiseAssignPolicy {};
+  struct ElementwiseSwapPolicy {};
+  struct ElementwiseConstructPolicy {};
 
-  using reference = value_type&;
-  using const_reference = const value_type&;
-  using RValueReference = value_type&&;
-  using iterator = pointer;
-  using const_iterator = const_pointer;
-  using reverse_iterator = std::reverse_iterator<iterator>;
-  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
-  using MoveIterator = std::move_iterator<iterator>;
-  using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
+  using MoveAssignmentPolicy = absl::conditional_t<
+      // Fast path: if the value type can be trivially move assigned and
+      // destroyed, and we know the allocator doesn't do anything fancy, then
+      // it's safe for us to simply adopt the contents of the storage for
+      // `other` and remove its own reference to them. It's as if we had
+      // individually move-assigned each value and then destroyed the original.
+      absl::conjunction<absl::is_trivially_move_assignable<ValueType<A>>,
+                        absl::is_trivially_destructible<ValueType<A>>,
+                        std::is_same<A, std::allocator<ValueType<A>>>>::value,
+      MemcpyPolicy,
+      // Otherwise we use move assignment if possible. If not, we simulate
+      // move assignment using move construction.
+      //
+      // Note that this is in contrast to e.g. std::vector and std::optional,
+      // which are themselves not move-assignable when their contained type is
+      // not.
+      absl::conditional_t<IsMoveAssignOk<A>::value, ElementwiseAssignPolicy,
+                          ElementwiseConstructPolicy>>;
 
-  using StorageView = inlined_vector_internal::StorageView<allocator_type>;
+  // The policy to be used specifically when swapping inlined elements.
+  using SwapInlinedElementsPolicy = absl::conditional_t<
+      // Fast path: if the value type can be trivially move constructed/assigned
+      // and destroyed, and we know the allocator doesn't do anything fancy,
+      // then it's safe for us to simply swap the bytes in the inline storage.
+      // It's as if we had move-constructed a temporary vector, move-assigned
+      // one to the other, then move-assigned the first from the temporary.
+      absl::conjunction<absl::is_trivially_move_constructible<ValueType<A>>,
+                        absl::is_trivially_move_assignable<ValueType<A>>,
+                        absl::is_trivially_destructible<ValueType<A>>,
+                        std::is_same<A, std::allocator<ValueType<A>>>>::value,
+      MemcpyPolicy,
+      absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
+                          ElementwiseConstructPolicy>>;
 
-  template <typename Iterator>
-  using IteratorValueAdapter =
-      inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
-  using CopyValueAdapter =
-      inlined_vector_internal::CopyValueAdapter<allocator_type>;
-  using DefaultValueAdapter =
-      inlined_vector_internal::DefaultValueAdapter<allocator_type>;
-
-  using AllocationTransaction =
-      inlined_vector_internal::AllocationTransaction<allocator_type>;
-  using ConstructionTransaction =
-      inlined_vector_internal::ConstructionTransaction<allocator_type>;
-
-  static size_type NextCapacity(size_type current_capacity) {
+  static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
     return current_capacity * 2;
   }
 
-  static size_type ComputeCapacity(size_type current_capacity,
-                                   size_type requested_capacity) {
+  static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
+                                     SizeType<A> requested_capacity) {
     return (std::max)(NextCapacity(current_capacity), requested_capacity);
   }
 
@@ -298,140 +347,175 @@
   // Storage Constructors and Destructor
   // ---------------------------------------------------------------------------
 
-  Storage() : metadata_() {}
+  Storage() : metadata_(A(), /* size and is_allocated */ 0u) {}
 
-  explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {}
+  explicit Storage(const A& allocator)
+      : metadata_(allocator, /* size and is_allocated */ 0u) {}
 
   ~Storage() {
-    pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
-    inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
-    DeallocateIfAllocated();
+    // Fast path: if we are empty and not allocated, there's nothing to do.
+    if (GetSizeAndIsAllocated() == 0) {
+      return;
+    }
+
+    // Fast path: if no destructors need to be run and we know the allocator
+    // doesn't do anything fancy, then all we need to do is deallocate (and
+    // maybe not even that).
+    if (absl::is_trivially_destructible<ValueType<A>>::value &&
+        std::is_same<A, std::allocator<ValueType<A>>>::value) {
+      DeallocateIfAllocated();
+      return;
+    }
+
+    DestroyContents();
   }
 
   // ---------------------------------------------------------------------------
   // Storage Member Accessors
   // ---------------------------------------------------------------------------
 
-  size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+  SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
 
-  const size_type& GetSizeAndIsAllocated() const {
+  const SizeType<A>& GetSizeAndIsAllocated() const {
     return metadata_.template get<1>();
   }
 
-  size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+  SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
 
   bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
 
-  pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+  Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
 
-  const_pointer GetAllocatedData() const {
+  ConstPointer<A> GetAllocatedData() const {
     return data_.allocated.allocated_data;
   }
 
-  pointer GetInlinedData() {
-    return reinterpret_cast<pointer>(
-        std::addressof(data_.inlined.inlined_data[0]));
+  // ABSL_ATTRIBUTE_NO_SANITIZE_CFI is used because the memory pointed to may be
+  // uninitialized, a common pattern in allocate()+construct() APIs.
+  // https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking
+  // NOTE: When this was written, LLVM documentation did not explicitly
+  // mention that casting `char*` and using `reinterpret_cast` qualifies
+  // as a bad cast.
+  ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer<A> GetInlinedData() {
+    return reinterpret_cast<Pointer<A>>(data_.inlined.inlined_data);
   }
 
-  const_pointer GetInlinedData() const {
-    return reinterpret_cast<const_pointer>(
-        std::addressof(data_.inlined.inlined_data[0]));
+  ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer<A> GetInlinedData() const {
+    return reinterpret_cast<ConstPointer<A>>(data_.inlined.inlined_data);
   }
 
-  size_type GetAllocatedCapacity() const {
+  SizeType<A> GetAllocatedCapacity() const {
     return data_.allocated.allocated_capacity;
   }
 
-  size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
-
-  StorageView MakeStorageView() {
-    return GetIsAllocated()
-               ? StorageView{GetAllocatedData(), GetSize(),
-                             GetAllocatedCapacity()}
-               : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+  SizeType<A> GetInlinedCapacity() const {
+    return static_cast<SizeType<A>>(kOptimalInlinedSize);
   }
 
-  allocator_type* GetAllocPtr() {
-    return std::addressof(metadata_.template get<0>());
+  StorageView<A> MakeStorageView() {
+    return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
+                                             GetAllocatedCapacity()}
+                            : StorageView<A>{GetInlinedData(), GetSize(),
+                                             GetInlinedCapacity()};
   }
 
-  const allocator_type* GetAllocPtr() const {
-    return std::addressof(metadata_.template get<0>());
-  }
+  A& GetAllocator() { return metadata_.template get<0>(); }
+
+  const A& GetAllocator() const { return metadata_.template get<0>(); }
 
   // ---------------------------------------------------------------------------
   // Storage Member Mutators
   // ---------------------------------------------------------------------------
 
-  template <typename ValueAdapter>
-  void Initialize(ValueAdapter values, size_type new_size);
+  ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
 
   template <typename ValueAdapter>
-  void Assign(ValueAdapter values, size_type new_size);
+  void Initialize(ValueAdapter values, SizeType<A> new_size);
 
   template <typename ValueAdapter>
-  void Resize(ValueAdapter values, size_type new_size);
+  void Assign(ValueAdapter values, SizeType<A> new_size);
 
   template <typename ValueAdapter>
-  iterator Insert(const_iterator pos, ValueAdapter values,
-                  size_type insert_count);
+  void Resize(ValueAdapter values, SizeType<A> new_size);
+
+  template <typename ValueAdapter>
+  Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
+                     SizeType<A> insert_count);
 
   template <typename... Args>
-  reference EmplaceBack(Args&&... args);
+  Reference<A> EmplaceBack(Args&&... args);
 
-  iterator Erase(const_iterator from, const_iterator to);
+  Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
 
-  void Reserve(size_type requested_capacity);
+  void Reserve(SizeType<A> requested_capacity);
 
   void ShrinkToFit();
 
   void Swap(Storage* other_storage_ptr);
 
   void SetIsAllocated() {
-    GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+    GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
   }
 
   void UnsetIsAllocated() {
-    GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+    GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
   }
 
-  void SetSize(size_type size) {
+  void SetSize(SizeType<A> size) {
     GetSizeAndIsAllocated() =
-        (size << 1) | static_cast<size_type>(GetIsAllocated());
+        (size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
   }
 
-  void SetAllocatedSize(size_type size) {
-    GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+  void SetAllocatedSize(SizeType<A> size) {
+    GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
   }
 
-  void SetInlinedSize(size_type size) {
-    GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+  void SetInlinedSize(SizeType<A> size) {
+    GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
   }
 
-  void AddSize(size_type count) {
-    GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+  void AddSize(SizeType<A> count) {
+    GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
   }
 
-  void SubtractSize(size_type count) {
-    assert(count <= GetSize());
+  void SubtractSize(SizeType<A> count) {
+    ABSL_HARDENING_ASSERT(count <= GetSize());
 
-    GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
+    GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
   }
 
-  void SetAllocatedData(pointer data, size_type capacity) {
-    data_.allocated.allocated_data = data;
-    data_.allocated.allocated_capacity = capacity;
-  }
-
-  void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
-    SetAllocatedData(allocation_tx_ptr->GetData(),
-                     allocation_tx_ptr->GetCapacity());
-
-    allocation_tx_ptr->Reset();
+  void SetAllocation(Allocation<A> allocation) {
+    data_.allocated.allocated_data = allocation.data;
+    data_.allocated.allocated_capacity = allocation.capacity;
   }
 
   void MemcpyFrom(const Storage& other_storage) {
-    assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+    // Assumption check: it doesn't make sense to memcpy inlined elements unless
+    // we know the allocator doesn't do anything fancy, and one of the following
+    // holds:
+    //
+    //  *  The elements are trivially relocatable.
+    //
+    //  *  It's possible to trivially assign the elements and then destroy the
+    //     source.
+    //
+    //  *  It's possible to trivially copy construct/assign the elements.
+    //
+    {
+      using V = ValueType<A>;
+      ABSL_HARDENING_ASSERT(
+          other_storage.GetIsAllocated() ||
+          (std::is_same<A, std::allocator<V>>::value &&
+           (
+               // First case above
+               absl::is_trivially_relocatable<V>::value ||
+               // Second case above
+               (absl::is_trivially_move_assignable<V>::value &&
+                absl::is_trivially_destructible<V>::value) ||
+               // Third case above
+               (absl::is_trivially_copy_constructible<V>::value ||
+                absl::is_trivially_copy_assignable<V>::value))));
+    }
 
     GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
     data_ = other_storage.data_;
@@ -439,22 +523,30 @@
 
   void DeallocateIfAllocated() {
     if (GetIsAllocated()) {
-      AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
-                                  GetAllocatedCapacity());
+      MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+                                   GetAllocatedCapacity());
     }
   }
 
  private:
-  using Metadata =
-      container_internal::CompressedTuple<allocator_type, size_type>;
+  ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
+
+  using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
 
   struct Allocated {
-    pointer allocated_data;
-    size_type allocated_capacity;
+    Pointer<A> allocated_data;
+    SizeType<A> allocated_capacity;
   };
 
+  // `kOptimalInlinedSize` is an automatically adjusted inlined capacity of the
+  // `InlinedVector`. Sometimes, it is possible to increase the capacity (from
+  // the user requested `N`) without increasing the size of the `InlinedVector`.
+  static constexpr size_t kOptimalInlinedSize =
+      (std::max)(N, sizeof(Allocated) / sizeof(ValueType<A>));
+
   struct Inlined {
-    alignas(value_type) char inlined_data[sizeof(value_type[N])];
+    alignas(ValueType<A>) char inlined_data[sizeof(
+        ValueType<A>[kOptimalInlinedSize])];
   };
 
   union Data {
@@ -462,33 +554,88 @@
     Inlined inlined;
   };
 
+  void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType<A> n);
+  void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType<A> n);
+
+  void SwapInlinedElements(MemcpyPolicy, Storage* other);
+  template <typename NotMemcpyPolicy>
+  void SwapInlinedElements(NotMemcpyPolicy, Storage* other);
+
+  template <typename... Args>
+  ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
+
   Metadata metadata_;
   Data data_;
 };
 
 template <typename T, size_t N, typename A>
+void Storage<T, N, A>::DestroyContents() {
+  Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+  DestroyAdapter<A>::DestroyElements(GetAllocator(), data, GetSize());
+  DeallocateIfAllocated();
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::InitFrom(const Storage& other) {
+  const SizeType<A> n = other.GetSize();
+  ABSL_HARDENING_ASSERT(n > 0);  // Empty sources handled handled in caller.
+  ConstPointer<A> src;
+  Pointer<A> dst;
+  if (!other.GetIsAllocated()) {
+    dst = GetInlinedData();
+    src = other.GetInlinedData();
+  } else {
+    // Because this is only called from the `InlinedVector` constructors, it's
+    // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+    // throws, deallocation will be automatically handled by `~Storage()`.
+    SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+    Allocation<A> allocation =
+        MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+    SetAllocation(allocation);
+    dst = allocation.data;
+    src = other.GetAllocatedData();
+  }
+
+  // Fast path: if the value type is trivially copy constructible and we know
+  // the allocator doesn't do anything fancy, then we know it is legal for us to
+  // simply memcpy the other vector's elements.
+  if (absl::is_trivially_copy_constructible<ValueType<A>>::value &&
+      std::is_same<A, std::allocator<ValueType<A>>>::value) {
+    std::memcpy(reinterpret_cast<char*>(dst),
+                reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
+  } else {
+    auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
+    ConstructElements<A>(GetAllocator(), dst, values, n);
+  }
+
+  GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
     -> void {
   // Only callable from constructors!
-  assert(!GetIsAllocated());
-  assert(GetSize() == 0);
+  ABSL_HARDENING_ASSERT(!GetIsAllocated());
+  ABSL_HARDENING_ASSERT(GetSize() == 0);
 
-  pointer construct_data;
+  Pointer<A> construct_data;
   if (new_size > GetInlinedCapacity()) {
     // Because this is only called from the `InlinedVector` constructors, it's
     // safe to take on the allocation with size `0`. If `ConstructElements(...)`
     // throws, deallocation will be automatically handled by `~Storage()`.
-    size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
-    construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
-    SetAllocatedData(construct_data, new_capacity);
+    SizeType<A> requested_capacity =
+        ComputeCapacity(GetInlinedCapacity(), new_size);
+    Allocation<A> allocation =
+        MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+    construct_data = allocation.data;
+    SetAllocation(allocation);
     SetIsAllocated();
   } else {
     construct_data = GetInlinedData();
   }
 
-  inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
-                                             &values, new_size);
+  ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
 
   // Since the initial size was guaranteed to be `0` and the allocated bit is
   // already correct for either case, *adding* `new_size` gives us the correct
@@ -498,18 +645,20 @@
 
 template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
-  StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
+    -> void {
+  StorageView<A> storage_view = MakeStorageView();
 
-  AllocationTransaction allocation_tx(GetAllocPtr());
+  AllocationTransaction<A> allocation_tx(GetAllocator());
 
-  absl::Span<value_type> assign_loop;
-  absl::Span<value_type> construct_loop;
-  absl::Span<value_type> destroy_loop;
+  absl::Span<ValueType<A>> assign_loop;
+  absl::Span<ValueType<A>> construct_loop;
+  absl::Span<ValueType<A>> destroy_loop;
 
   if (new_size > storage_view.capacity) {
-    size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
-    construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+    SizeType<A> requested_capacity =
+        ComputeCapacity(storage_view.capacity, new_size);
+    construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
     destroy_loop = {storage_view.data, storage_view.size};
   } else if (new_size > storage_view.size) {
     assign_loop = {storage_view.data, storage_view.size};
@@ -520,18 +669,17 @@
     destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
   }
 
-  inlined_vector_internal::AssignElements(assign_loop.data(), &values,
-                                          assign_loop.size());
+  AssignElements<A>(assign_loop.data(), values, assign_loop.size());
 
-  inlined_vector_internal::ConstructElements(
-      GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+  ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
+                       construct_loop.size());
 
-  inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
-                                           destroy_loop.size());
+  DestroyAdapter<A>::DestroyElements(GetAllocator(), destroy_loop.data(),
+                                     destroy_loop.size());
 
   if (allocation_tx.DidAllocate()) {
     DeallocateIfAllocated();
-    AcquireAllocatedData(&allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
     SetIsAllocated();
   }
 
@@ -540,125 +688,120 @@
 
 template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
-  StorageView storage_view = MakeStorageView();
-
-  IteratorValueAdapter<MoveIterator> move_values(
-      MoveIterator(storage_view.data));
-
-  AllocationTransaction allocation_tx(GetAllocPtr());
-  ConstructionTransaction construction_tx(GetAllocPtr());
-
-  absl::Span<value_type> construct_loop;
-  absl::Span<value_type> move_construct_loop;
-  absl::Span<value_type> destroy_loop;
-
-  if (new_size > storage_view.capacity) {
-    size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
-    pointer new_data = allocation_tx.Allocate(new_capacity);
-    construct_loop = {new_data + storage_view.size,
-                      new_size - storage_view.size};
-    move_construct_loop = {new_data, storage_view.size};
-    destroy_loop = {storage_view.data, storage_view.size};
-  } else if (new_size > storage_view.size) {
-    construct_loop = {storage_view.data + storage_view.size,
-                      new_size - storage_view.size};
+auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
+    -> void {
+  StorageView<A> storage_view = MakeStorageView();
+  Pointer<A> const base = storage_view.data;
+  const SizeType<A> size = storage_view.size;
+  A& alloc = GetAllocator();
+  if (new_size <= size) {
+    // Destroy extra old elements.
+    DestroyAdapter<A>::DestroyElements(alloc, base + new_size, size - new_size);
+  } else if (new_size <= storage_view.capacity) {
+    // Construct new elements in place.
+    ConstructElements<A>(alloc, base + size, values, new_size - size);
   } else {
-    destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
-  }
+    // Steps:
+    //  a. Allocate new backing store.
+    //  b. Construct new elements in new backing store.
+    //  c. Move existing elements from old backing store to new backing store.
+    //  d. Destroy all elements in old backing store.
+    // Use transactional wrappers for the first two steps so we can roll
+    // back if necessary due to exceptions.
+    AllocationTransaction<A> allocation_tx(alloc);
+    SizeType<A> requested_capacity =
+        ComputeCapacity(storage_view.capacity, new_size);
+    Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
 
-  construction_tx.Construct(construct_loop.data(), &values,
-                            construct_loop.size());
+    ConstructionTransaction<A> construction_tx(alloc);
+    construction_tx.Construct(new_data + size, values, new_size - size);
 
-  inlined_vector_internal::ConstructElements(
-      GetAllocPtr(), move_construct_loop.data(), &move_values,
-      move_construct_loop.size());
+    IteratorValueAdapter<A, MoveIterator<A>> move_values(
+        (MoveIterator<A>(base)));
+    ConstructElements<A>(alloc, new_data, move_values, size);
 
-  inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
-                                           destroy_loop.size());
-
-  construction_tx.Commit();
-  if (allocation_tx.DidAllocate()) {
+    DestroyAdapter<A>::DestroyElements(alloc, base, size);
+    std::move(construction_tx).Commit();
     DeallocateIfAllocated();
-    AcquireAllocatedData(&allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
     SetIsAllocated();
   }
-
   SetSize(new_size);
 }
 
 template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
-                              size_type insert_count) -> iterator {
-  StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
+                              SizeType<A> insert_count) -> Iterator<A> {
+  StorageView<A> storage_view = MakeStorageView();
 
-  size_type insert_index =
-      std::distance(const_iterator(storage_view.data), pos);
-  size_type insert_end_index = insert_index + insert_count;
-  size_type new_size = storage_view.size + insert_count;
+  auto insert_index = static_cast<SizeType<A>>(
+      std::distance(ConstIterator<A>(storage_view.data), pos));
+  SizeType<A> insert_end_index = insert_index + insert_count;
+  SizeType<A> new_size = storage_view.size + insert_count;
 
   if (new_size > storage_view.capacity) {
-    AllocationTransaction allocation_tx(GetAllocPtr());
-    ConstructionTransaction construction_tx(GetAllocPtr());
-    ConstructionTransaction move_construciton_tx(GetAllocPtr());
+    AllocationTransaction<A> allocation_tx(GetAllocator());
+    ConstructionTransaction<A> construction_tx(GetAllocator());
+    ConstructionTransaction<A> move_construction_tx(GetAllocator());
 
-    IteratorValueAdapter<MoveIterator> move_values(
-        MoveIterator(storage_view.data));
+    IteratorValueAdapter<A, MoveIterator<A>> move_values(
+        MoveIterator<A>(storage_view.data));
 
-    size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
-    pointer new_data = allocation_tx.Allocate(new_capacity);
+    SizeType<A> requested_capacity =
+        ComputeCapacity(storage_view.capacity, new_size);
+    Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
 
-    construction_tx.Construct(new_data + insert_index, &values, insert_count);
+    construction_tx.Construct(new_data + insert_index, values, insert_count);
 
-    move_construciton_tx.Construct(new_data, &move_values, insert_index);
+    move_construction_tx.Construct(new_data, move_values, insert_index);
 
-    inlined_vector_internal::ConstructElements(
-        GetAllocPtr(), new_data + insert_end_index, &move_values,
-        storage_view.size - insert_index);
+    ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
+                         move_values, storage_view.size - insert_index);
 
-    inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
-                                             storage_view.size);
+    DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+                                       storage_view.size);
 
-    construction_tx.Commit();
-    move_construciton_tx.Commit();
+    std::move(construction_tx).Commit();
+    std::move(move_construction_tx).Commit();
     DeallocateIfAllocated();
-    AcquireAllocatedData(&allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
 
     SetAllocatedSize(new_size);
-    return iterator(new_data + insert_index);
+    return Iterator<A>(new_data + insert_index);
   } else {
-    size_type move_construction_destination_index =
+    SizeType<A> move_construction_destination_index =
         (std::max)(insert_end_index, storage_view.size);
 
-    ConstructionTransaction move_construction_tx(GetAllocPtr());
+    ConstructionTransaction<A> move_construction_tx(GetAllocator());
 
-    IteratorValueAdapter<MoveIterator> move_construction_values(
-        MoveIterator(storage_view.data +
-                     (move_construction_destination_index - insert_count)));
-    absl::Span<value_type> move_construction = {
+    IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
+        MoveIterator<A>(storage_view.data +
+                        (move_construction_destination_index - insert_count)));
+    absl::Span<ValueType<A>> move_construction = {
         storage_view.data + move_construction_destination_index,
         new_size - move_construction_destination_index};
 
-    pointer move_assignment_values = storage_view.data + insert_index;
-    absl::Span<value_type> move_assignment = {
+    Pointer<A> move_assignment_values = storage_view.data + insert_index;
+    absl::Span<ValueType<A>> move_assignment = {
         storage_view.data + insert_end_index,
         move_construction_destination_index - insert_end_index};
 
-    absl::Span<value_type> insert_assignment = {move_assignment_values,
-                                                move_construction.size()};
+    absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
+                                                  move_construction.size()};
 
-    absl::Span<value_type> insert_construction = {
+    absl::Span<ValueType<A>> insert_construction = {
         insert_assignment.data() + insert_assignment.size(),
         insert_count - insert_assignment.size()};
 
     move_construction_tx.Construct(move_construction.data(),
-                                   &move_construction_values,
+                                   move_construction_values,
                                    move_construction.size());
 
-    for (pointer destination = move_assignment.data() + move_assignment.size(),
-                 last_destination = move_assignment.data(),
-                 source = move_assignment_values + move_assignment.size();
+    for (Pointer<A>
+             destination = move_assignment.data() + move_assignment.size(),
+             last_destination = move_assignment.data(),
+             source = move_assignment_values + move_assignment.size();
          ;) {
       --destination;
       --source;
@@ -666,157 +809,164 @@
       *destination = std::move(*source);
     }
 
-    inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
-                                            insert_assignment.size());
+    AssignElements<A>(insert_assignment.data(), values,
+                      insert_assignment.size());
 
-    inlined_vector_internal::ConstructElements(
-        GetAllocPtr(), insert_construction.data(), &values,
-        insert_construction.size());
+    ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
+                         insert_construction.size());
 
-    move_construction_tx.Commit();
+    std::move(move_construction_tx).Commit();
 
     AddSize(insert_count);
-    return iterator(storage_view.data + insert_index);
+    return Iterator<A>(storage_view.data + insert_index);
   }
 }
 
 template <typename T, size_t N, typename A>
 template <typename... Args>
-auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
-  StorageView storage_view = MakeStorageView();
-
-  AllocationTransaction allocation_tx(GetAllocPtr());
-
-  IteratorValueAdapter<MoveIterator> move_values(
-      MoveIterator(storage_view.data));
-
-  pointer construct_data;
-  if (storage_view.size == storage_view.capacity) {
-    size_type new_capacity = NextCapacity(storage_view.capacity);
-    construct_data = allocation_tx.Allocate(new_capacity);
-  } else {
-    construct_data = storage_view.data;
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
+  StorageView<A> storage_view = MakeStorageView();
+  const SizeType<A> n = storage_view.size;
+  if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+    // Fast path; new element fits.
+    Pointer<A> last_ptr = storage_view.data + n;
+    AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+                                  std::forward<Args>(args)...);
+    AddSize(1);
+    return *last_ptr;
   }
+  // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+  return EmplaceBackSlow(std::forward<Args>(args)...);
+}
 
-  pointer last_ptr = construct_data + storage_view.size;
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
+  StorageView<A> storage_view = MakeStorageView();
+  AllocationTransaction<A> allocation_tx(GetAllocator());
+  IteratorValueAdapter<A, MoveIterator<A>> move_values(
+      MoveIterator<A>(storage_view.data));
+  SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+  Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
+  Pointer<A> last_ptr = construct_data + storage_view.size;
 
-  AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
-                             std::forward<Args>(args)...);
-
-  if (allocation_tx.DidAllocate()) {
-    ABSL_INTERNAL_TRY {
-      inlined_vector_internal::ConstructElements(
-          GetAllocPtr(), allocation_tx.GetData(), &move_values,
-          storage_view.size);
-    }
-    ABSL_INTERNAL_CATCH_ANY {
-      AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
-      ABSL_INTERNAL_RETHROW;
-    }
-
-    inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
-                                             storage_view.size);
-
-    DeallocateIfAllocated();
-    AcquireAllocatedData(&allocation_tx);
-    SetIsAllocated();
+  // Construct new element.
+  AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+                                std::forward<Args>(args)...);
+  // Move elements from old backing store to new backing store.
+  ABSL_INTERNAL_TRY {
+    ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
+                         storage_view.size);
   }
+  ABSL_INTERNAL_CATCH_ANY {
+    AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
+    ABSL_INTERNAL_RETHROW;
+  }
+  // Destroy elements in old backing store.
+  DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+                                     storage_view.size);
 
+  DeallocateIfAllocated();
+  SetAllocation(std::move(allocation_tx).Release());
+  SetIsAllocated();
   AddSize(1);
   return *last_ptr;
 }
 
 template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
-    -> iterator {
-  StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
+    -> Iterator<A> {
+  StorageView<A> storage_view = MakeStorageView();
 
-  size_type erase_size = std::distance(from, to);
-  size_type erase_index =
-      std::distance(const_iterator(storage_view.data), from);
-  size_type erase_end_index = erase_index + erase_size;
+  auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));
+  auto erase_index = static_cast<SizeType<A>>(
+      std::distance(ConstIterator<A>(storage_view.data), from));
+  SizeType<A> erase_end_index = erase_index + erase_size;
 
-  IteratorValueAdapter<MoveIterator> move_values(
-      MoveIterator(storage_view.data + erase_end_index));
+  IteratorValueAdapter<A, MoveIterator<A>> move_values(
+      MoveIterator<A>(storage_view.data + erase_end_index));
 
-  inlined_vector_internal::AssignElements(storage_view.data + erase_index,
-                                          &move_values,
-                                          storage_view.size - erase_end_index);
+  AssignElements<A>(storage_view.data + erase_index, move_values,
+                    storage_view.size - erase_end_index);
 
-  inlined_vector_internal::DestroyElements(
-      GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+  DestroyAdapter<A>::DestroyElements(
+      GetAllocator(), storage_view.data + (storage_view.size - erase_size),
       erase_size);
 
   SubtractSize(erase_size);
-  return iterator(storage_view.data + erase_index);
+  return Iterator<A>(storage_view.data + erase_index);
 }
 
 template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
-  StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
+  StorageView<A> storage_view = MakeStorageView();
 
   if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
 
-  AllocationTransaction allocation_tx(GetAllocPtr());
+  AllocationTransaction<A> allocation_tx(GetAllocator());
 
-  IteratorValueAdapter<MoveIterator> move_values(
-      MoveIterator(storage_view.data));
+  IteratorValueAdapter<A, MoveIterator<A>> move_values(
+      MoveIterator<A>(storage_view.data));
 
-  size_type new_capacity =
+  SizeType<A> new_requested_capacity =
       ComputeCapacity(storage_view.capacity, requested_capacity);
-  pointer new_data = allocation_tx.Allocate(new_capacity);
+  Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
 
-  inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
-                                             &move_values, storage_view.size);
+  ConstructElements<A>(GetAllocator(), new_data, move_values,
+                       storage_view.size);
 
-  inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
-                                           storage_view.size);
+  DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+                                     storage_view.size);
 
   DeallocateIfAllocated();
-  AcquireAllocatedData(&allocation_tx);
+  SetAllocation(std::move(allocation_tx).Release());
   SetIsAllocated();
 }
 
 template <typename T, size_t N, typename A>
 auto Storage<T, N, A>::ShrinkToFit() -> void {
   // May only be called on allocated instances!
-  assert(GetIsAllocated());
+  ABSL_HARDENING_ASSERT(GetIsAllocated());
 
-  StorageView storage_view{GetAllocatedData(), GetSize(),
-                           GetAllocatedCapacity()};
+  StorageView<A> storage_view{GetAllocatedData(), GetSize(),
+                              GetAllocatedCapacity()};
 
   if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
 
-  AllocationTransaction allocation_tx(GetAllocPtr());
+  AllocationTransaction<A> allocation_tx(GetAllocator());
 
-  IteratorValueAdapter<MoveIterator> move_values(
-      MoveIterator(storage_view.data));
+  IteratorValueAdapter<A, MoveIterator<A>> move_values(
+      MoveIterator<A>(storage_view.data));
 
-  pointer construct_data;
+  Pointer<A> construct_data;
   if (storage_view.size > GetInlinedCapacity()) {
-    size_type new_capacity = storage_view.size;
-    construct_data = allocation_tx.Allocate(new_capacity);
+    SizeType<A> requested_capacity = storage_view.size;
+    construct_data = allocation_tx.Allocate(requested_capacity);
+    if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+      // Already using the smallest available heap allocation.
+      return;
+    }
   } else {
     construct_data = GetInlinedData();
   }
 
   ABSL_INTERNAL_TRY {
-    inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
-                                               &move_values, storage_view.size);
+    ConstructElements<A>(GetAllocator(), construct_data, move_values,
+                         storage_view.size);
   }
   ABSL_INTERNAL_CATCH_ANY {
-    SetAllocatedData(storage_view.data, storage_view.capacity);
+    SetAllocation({storage_view.data, storage_view.capacity});
     ABSL_INTERNAL_RETHROW;
   }
 
-  inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
-                                           storage_view.size);
+  DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+                                     storage_view.size);
 
-  AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
-                              storage_view.capacity);
+  MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+                               storage_view.capacity);
 
   if (allocation_tx.DidAllocate()) {
-    AcquireAllocatedData(&allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
   } else {
     UnsetIsAllocated();
   }
@@ -825,68 +975,116 @@
 template <typename T, size_t N, typename A>
 auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
   using std::swap;
-  assert(this != other_storage_ptr);
+  ABSL_HARDENING_ASSERT(this != other_storage_ptr);
 
   if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
     swap(data_.allocated, other_storage_ptr->data_.allocated);
   } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
-    Storage* small_ptr = this;
-    Storage* large_ptr = other_storage_ptr;
-    if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
-
-    for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
-      swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
-    }
-
-    IteratorValueAdapter<MoveIterator> move_values(
-        MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
-
-    inlined_vector_internal::ConstructElements(
-        large_ptr->GetAllocPtr(),
-        small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
-        large_ptr->GetSize() - small_ptr->GetSize());
-
-    inlined_vector_internal::DestroyElements(
-        large_ptr->GetAllocPtr(),
-        large_ptr->GetInlinedData() + small_ptr->GetSize(),
-        large_ptr->GetSize() - small_ptr->GetSize());
+    SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr);
   } else {
     Storage* allocated_ptr = this;
     Storage* inlined_ptr = other_storage_ptr;
     if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
 
-    StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
-                                       allocated_ptr->GetSize(),
-                                       allocated_ptr->GetAllocatedCapacity()};
+    StorageView<A> allocated_storage_view{
+        allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
+        allocated_ptr->GetAllocatedCapacity()};
 
-    IteratorValueAdapter<MoveIterator> move_values(
-        MoveIterator(inlined_ptr->GetInlinedData()));
+    IteratorValueAdapter<A, MoveIterator<A>> move_values(
+        MoveIterator<A>(inlined_ptr->GetInlinedData()));
 
     ABSL_INTERNAL_TRY {
-      inlined_vector_internal::ConstructElements(
-          inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
-          &move_values, inlined_ptr->GetSize());
+      ConstructElements<A>(inlined_ptr->GetAllocator(),
+                           allocated_ptr->GetInlinedData(), move_values,
+                           inlined_ptr->GetSize());
     }
     ABSL_INTERNAL_CATCH_ANY {
-      allocated_ptr->SetAllocatedData(allocated_storage_view.data,
-                                      allocated_storage_view.capacity);
+      allocated_ptr->SetAllocation(Allocation<A>{
+          allocated_storage_view.data, allocated_storage_view.capacity});
       ABSL_INTERNAL_RETHROW;
     }
 
-    inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
-                                             inlined_ptr->GetInlinedData(),
-                                             inlined_ptr->GetSize());
+    DestroyAdapter<A>::DestroyElements(inlined_ptr->GetAllocator(),
+                                       inlined_ptr->GetInlinedData(),
+                                       inlined_ptr->GetSize());
 
-    inlined_ptr->SetAllocatedData(allocated_storage_view.data,
-                                  allocated_storage_view.capacity);
+    inlined_ptr->SetAllocation(Allocation<A>{allocated_storage_view.data,
+                                             allocated_storage_view.capacity});
   }
 
   swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
-  swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+  swap(GetAllocator(), other_storage_ptr->GetAllocator());
 }
 
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::SwapN(ElementwiseSwapPolicy, Storage* other,
+                             SizeType<A> n) {
+  std::swap_ranges(GetInlinedData(), GetInlinedData() + n,
+                   other->GetInlinedData());
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::SwapN(ElementwiseConstructPolicy, Storage* other,
+                             SizeType<A> n) {
+  Pointer<A> a = GetInlinedData();
+  Pointer<A> b = other->GetInlinedData();
+  // see note on allocators in `SwapInlinedElements`.
+  A& allocator_a = GetAllocator();
+  A& allocator_b = other->GetAllocator();
+  for (SizeType<A> i = 0; i < n; ++i, ++a, ++b) {
+    ValueType<A> tmp(std::move(*a));
+
+    AllocatorTraits<A>::destroy(allocator_a, a);
+    AllocatorTraits<A>::construct(allocator_b, a, std::move(*b));
+
+    AllocatorTraits<A>::destroy(allocator_b, b);
+    AllocatorTraits<A>::construct(allocator_a, b, std::move(tmp));
+  }
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::SwapInlinedElements(MemcpyPolicy, Storage* other) {
+  Data tmp = data_;
+  data_ = other->data_;
+  other->data_ = tmp;
+}
+
+template <typename T, size_t N, typename A>
+template <typename NotMemcpyPolicy>
+void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy,
+                                           Storage* other) {
+  // Note: `destroy` needs to use pre-swap allocator while `construct` -
+  // post-swap allocator. Allocators will be swapped later on outside of
+  // `SwapInlinedElements`.
+  Storage* small_ptr = this;
+  Storage* large_ptr = other;
+  if (small_ptr->GetSize() > large_ptr->GetSize()) {
+    std::swap(small_ptr, large_ptr);
+  }
+
+  auto small_size = small_ptr->GetSize();
+  auto diff = large_ptr->GetSize() - small_size;
+  SwapN(policy, other, small_size);
+
+  IteratorValueAdapter<A, MoveIterator<A>> move_values(
+      MoveIterator<A>(large_ptr->GetInlinedData() + small_size));
+
+  ConstructElements<A>(large_ptr->GetAllocator(),
+                       small_ptr->GetInlinedData() + small_size, move_values,
+                       diff);
+
+  DestroyAdapter<A>::DestroyElements(large_ptr->GetAllocator(),
+                                     large_ptr->GetInlinedData() + small_size,
+                                     diff);
+}
+
+// End ignore "array-bounds"
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
 }  // namespace inlined_vector_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#endif  // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_
diff --git a/abseil-cpp/absl/container/internal/layout.h b/abseil-cpp/absl/container/internal/layout.h
index 2336783..a59a243 100644
--- a/abseil-cpp/absl/container/internal/layout.h
+++ b/abseil-cpp/absl/container/internal/layout.h
@@ -404,7 +404,7 @@
   constexpr size_t Offset() const {
     static_assert(N < NumOffsets, "Index out of bounds");
     return adl_barrier::Align(
-        Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
+        Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
         ElementAlignment<N>::value);
   }
 
@@ -597,7 +597,7 @@
   constexpr size_t AllocSize() const {
     static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
     return Offset<NumTypes - 1>() +
-           SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
+        SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
   }
 
   // If built with --config=asan, poisons padding bytes (if any) in the
@@ -621,7 +621,7 @@
     // The `if` is an optimization. It doesn't affect the observable behaviour.
     if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
       size_t start =
-          Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
+          Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
       ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
     }
 #endif
@@ -645,7 +645,7 @@
   // produce "unsigned*" where another produces "unsigned int *".
   std::string DebugString() const {
     const auto offsets = Offsets();
-    const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
+    const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
     const std::string types[] = {
         adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
     std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
diff --git a/abseil-cpp/absl/container/internal/layout_benchmark.cc b/abseil-cpp/absl/container/internal/layout_benchmark.cc
new file mode 100644
index 0000000..3af35e3
--- /dev/null
+++ b/abseil-cpp/absl/container/internal/layout_benchmark.cc
@@ -0,0 +1,122 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Every benchmark should have the same performance as the corresponding
+// headroom benchmark.
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/layout.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::benchmark::DoNotOptimize;
+
+using Int128 = int64_t[2];
+
+// This benchmark provides the upper bound on performance for BM_OffsetConstant.
+template <size_t Offset, class... Ts>
+void BM_OffsetConstantHeadroom(benchmark::State& state) {
+  for (auto _ : state) {
+    DoNotOptimize(Offset);
+  }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetConstant(benchmark::State& state) {
+  using L = Layout<Ts...>;
+  ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset,
+                 "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>());
+  }
+}
+
+template <class... Ts>
+size_t VariableOffset(size_t n, size_t m, size_t k);
+
+template <>
+size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
+                                                        size_t k) {
+  auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); };
+  return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8);
+}
+
+template <>
+size_t VariableOffset<Int128, int32_t, int16_t, int8_t>(size_t n, size_t m,
+                                                        size_t k) {
+  // No alignment is necessary.
+  return n * 16 + m * 4 + k * 2;
+}
+
+// This benchmark provides the upper bound on performance for BM_OffsetVariable.
+template <size_t Offset, class... Ts>
+void BM_OffsetVariableHeadroom(benchmark::State& state) {
+  size_t n = 3;
+  size_t m = 5;
+  size_t k = 7;
+  ABSL_RAW_CHECK(VariableOffset<Ts...>(n, m, k) == Offset, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(n);
+    DoNotOptimize(m);
+    DoNotOptimize(k);
+    DoNotOptimize(VariableOffset<Ts...>(n, m, k));
+  }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetVariable(benchmark::State& state) {
+  using L = Layout<Ts...>;
+  size_t n = 3;
+  size_t m = 5;
+  size_t k = 7;
+  ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset,
+                 "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(n);
+    DoNotOptimize(m);
+    DoNotOptimize(k);
+    DoNotOptimize(L::Partial(n, m, k).template Offset<3>());
+  }
+}
+
+// Run all benchmarks in two modes:
+//
+//   Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
+//   Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?].
+
+#define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \
+  auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 =  \
+      NAME<OFFSET, T1, T2, T3, T4>;                    \
+  BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4)
+
+OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
+                 Int128);
+OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
+                 int8_t);
+OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
+                 Int128);
+OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
+                 int8_t);
+OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/container/internal/layout_test.cc b/abseil-cpp/absl/container/internal/layout_test.cc
index 757272f..ce599ce 100644
--- a/abseil-cpp/absl/container/internal/layout_test.cc
+++ b/abseil-cpp/absl/container/internal/layout_test.cc
@@ -26,7 +26,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/check.h"
 #include "absl/types/span.h"
 
 namespace absl {
@@ -38,7 +38,7 @@
 using ::testing::ElementsAre;
 
 size_t Distance(const void* from, const void* to) {
-  ABSL_RAW_CHECK(from <= to, "Distance must be non-negative");
+  CHECK_LE(from, to) << "Distance must be non-negative";
   return static_cast<const char*>(to) - static_cast<const char*>(from);
 }
 
@@ -128,8 +128,10 @@
   {
     using L = Layout<int32_t, int32_t>;
     SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
-    SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
-    SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
+    SameType<std::tuple<int32_t, int32_t>,
+             decltype(L::Partial())::ElementTypes>();
+    SameType<std::tuple<int32_t, int32_t>,
+             decltype(L::Partial(0))::ElementTypes>();
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
@@ -364,22 +366,25 @@
 }
 
 TEST(Layout, PointerByIndex) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
   }
   {
     using L = Layout<int32_t, int32_t>;
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
-    EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
     EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
     EXPECT_EQ(12,
-              Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+    EXPECT_EQ(
+        12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
     EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
   }
@@ -387,39 +392,44 @@
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
-    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
-    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
-    EXPECT_EQ(4,
-              Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+    EXPECT_EQ(
+        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
     EXPECT_EQ(8,
               Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
-    EXPECT_EQ(8,
-              Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
     EXPECT_EQ(
-        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+        4,
+        Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(
         8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(
@@ -428,7 +438,8 @@
         24,
         Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
     EXPECT_EQ(
-        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+        8,
+        Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
     EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
     EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@@ -436,83 +447,86 @@
 }
 
 TEST(Layout, PointerByType) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
-    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
-    EXPECT_EQ(4,
-              Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
-    EXPECT_EQ(8,
-              Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+        0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+        0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+                                 L::Partial(0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         0,
         Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(
-        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+                                 L::Partial(1, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         8,
         Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
-    EXPECT_EQ(
-        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+                                 L::Partial(5, 3).Pointer<int32_t>(p))));
     EXPECT_EQ(
         24,
         Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+                                 L::Partial(0, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<const Int128*>(
                                  L::Partial(0, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(
-        4,
-        Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+                                 L::Partial(1, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(8, Distance(p, Type<const Int128*>(
                                  L::Partial(1, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(5, 3, 1).Pointer<int8_t>(p))));
     EXPECT_EQ(24, Distance(p, Type<const Int128*>(
                                   L::Partial(5, 3, 1).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        8,
-        Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+                                 L::Partial(5, 3, 1).Pointer<int32_t>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
-    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
   }
 }
 
 TEST(Layout, MutablePointerByIndex) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
@@ -548,15 +562,18 @@
     EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
     EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
-    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
-    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
     EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
     EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@@ -564,52 +581,65 @@
 }
 
 TEST(Layout, MutablePointerByType) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
-    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
-    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(8,
               Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
-    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(4,
-              Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
     EXPECT_EQ(
         24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
-    EXPECT_EQ(8,
-              Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
     EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
     EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
@@ -617,7 +647,7 @@
 }
 
 TEST(Layout, Pointers) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   using L = Layout<int8_t, int8_t, Int128>;
   {
     const auto x = L::Partial();
@@ -653,7 +683,7 @@
 }
 
 TEST(Layout, MutablePointers) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   using L = Layout<int8_t, int8_t, Int128>;
   {
     const auto x = L::Partial();
@@ -686,7 +716,7 @@
 }
 
 TEST(Layout, SliceByIndexSize) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
@@ -714,7 +744,7 @@
 }
 
 TEST(Layout, SliceByTypeSize) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
@@ -736,7 +766,7 @@
 }
 
 TEST(Layout, MutableSliceByIndexSize) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
@@ -764,7 +794,7 @@
 }
 
 TEST(Layout, MutableSliceByTypeSize) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
@@ -786,71 +816,76 @@
 }
 
 TEST(Layout, SliceByIndexData) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
   }
   {
     using L = Layout<int32_t, int32_t>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
     EXPECT_EQ(
         12,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
-    EXPECT_EQ(12,
-              Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
     EXPECT_EQ(
         0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+               p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
     EXPECT_EQ(
         0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        4,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        8,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+               p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+            p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
@@ -864,7 +899,8 @@
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+            p,
+            Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
         4,
         Distance(
@@ -878,7 +914,8 @@
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+            p,
+            Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(
@@ -890,133 +927,132 @@
             p,
             Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+        0,
+        Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
     EXPECT_EQ(
-        8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+        8,
+        Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
   }
 }
 
 TEST(Layout, SliceByTypeData) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
+        0,
+        Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+            p,
+            Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
             p,
-            Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(
-            p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        4,
-        Distance(
-            p,
-            Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(
-            p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        8,
-        Distance(
-            p,
-            Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+            Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
             p,
-            Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+            Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
+        Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
                         .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(0, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
+                        .data()));
+    EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(1, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
+                        .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(5, 3).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(0, 0, 0).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(0, 0, 0).Slice<int32_t>(p))
+                                 .data()));
     EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
                                  L::Partial(0, 0, 0).Slice<Int128>(p))
                                  .data()));
-    EXPECT_EQ(
-        0,
-        Distance(
-            p,
-            Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        4,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
-                        .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(1, 0, 0).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(1, 0, 0).Slice<int32_t>(p))
+                                 .data()));
     EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
                                  L::Partial(1, 0, 0).Slice<Int128>(p))
                                  .data()));
-    EXPECT_EQ(
-        0,
-        Distance(
-            p,
-            Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(5, 3, 1).Slice<int8_t>(p))
+                                 .data()));
     EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
                                   L::Partial(5, 3, 1).Slice<Int128>(p))
                                   .data()));
-    EXPECT_EQ(
-        8,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
-                        .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(5, 3, 1).Slice<int32_t>(p))
+                                 .data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+        Distance(p,
+                 Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(p,
                  Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        8, Distance(
-               p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+        8,
+        Distance(
+            p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
   }
 }
 
 TEST(Layout, MutableSliceByIndexData) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
     EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
   }
   {
     using L = Layout<int32_t, int32_t>;
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
     EXPECT_EQ(
         12,
         Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
@@ -1025,123 +1061,149 @@
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+        Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
         0, Distance(
                p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        4,
-        Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+        4, Distance(
+               p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
         8, Distance(
                p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(
         24, Distance(
                 p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
     EXPECT_EQ(
-        8,
-        Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+        8, Distance(
+               p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(24,
               Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
-    EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+    EXPECT_EQ(8,
+              Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
   }
 }
 
 TEST(Layout, MutableSliceByTypeData) {
-  alignas(max_align_t) unsigned char p[100];
+  alignas(max_align_t) unsigned char p[100] = {0};
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+        Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        4, Distance(
-               p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+        Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        8, Distance(
-               p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
+            p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
             p,
             Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         4,
         Distance(
-            p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         8,
         Distance(
             p,
             Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(
@@ -1150,14 +1212,16 @@
     EXPECT_EQ(
         8,
         Distance(
-            p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+            p,
+            Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+        8,
+        Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
   }
 }
 
@@ -1204,7 +1268,7 @@
 }
 
 TEST(Layout, Slices) {
-  alignas(max_align_t) const unsigned char p[100] = {};
+  alignas(max_align_t) const unsigned char p[100] = {0};
   using L = Layout<int8_t, int8_t, Int128>;
   {
     const auto x = L::Partial();
@@ -1238,7 +1302,7 @@
 }
 
 TEST(Layout, MutableSlices) {
-  alignas(max_align_t) unsigned char p[100] = {};
+  alignas(max_align_t) unsigned char p[100] = {0};
   using L = Layout<int8_t, int8_t, Int128>;
   {
     const auto x = L::Partial();
@@ -1256,17 +1320,17 @@
   }
   {
     const auto x = L::Partial(1, 2, 3);
-    EXPECT_THAT(
-        (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
-        Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
-              IsSameSlice(x.Slice<2>(p))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
   }
   {
     const L x(1, 2, 3);
-    EXPECT_THAT(
-        (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
-        Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
-              IsSameSlice(x.Slice<2>(p))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
   }
 }
 
@@ -1286,7 +1350,13 @@
 TEST(Layout, OverAligned) {
   constexpr size_t M = alignof(max_align_t);
   constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+#ifdef __GNUC__
+  // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug:
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357
+  __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()];
+#else
   alignas(2 * M) unsigned char p[x.AllocSize()];
+#endif
   EXPECT_EQ(2 * M + 3, x.AllocSize());
   EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
 }
@@ -1398,7 +1468,8 @@
               x.DebugString());
   }
   {
-    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
     EXPECT_EQ(
         "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
         "@16" +
@@ -1406,7 +1477,8 @@
         x.DebugString());
   }
   {
-    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
     EXPECT_EQ(
         "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
         "@16" +
diff --git a/abseil-cpp/absl/container/internal/node_hash_policy.h b/abseil-cpp/absl/container/internal/node_slot_policy.h
similarity index 93%
rename from abseil-cpp/absl/container/internal/node_hash_policy.h
rename to abseil-cpp/absl/container/internal/node_slot_policy.h
index 4617162..baba574 100644
--- a/abseil-cpp/absl/container/internal/node_hash_policy.h
+++ b/abseil-cpp/absl/container/internal/node_slot_policy.h
@@ -30,8 +30,8 @@
 // It may also optionally define `value()` and `apply()`. For documentation on
 // these, see hash_policy_traits.h.
 
-#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
-#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
 
 #include <cassert>
 #include <cstddef>
@@ -46,7 +46,7 @@
 namespace container_internal {
 
 template <class Reference, class Policy>
-struct node_hash_policy {
+struct node_slot_policy {
   static_assert(std::is_lvalue_reference<Reference>::value, "");
 
   using slot_type = typename std::remove_cv<
@@ -89,4 +89,4 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#endif  // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
diff --git a/abseil-cpp/absl/container/internal/node_hash_policy_test.cc b/abseil-cpp/absl/container/internal/node_slot_policy_test.cc
similarity index 93%
rename from abseil-cpp/absl/container/internal/node_hash_policy_test.cc
rename to abseil-cpp/absl/container/internal/node_slot_policy_test.cc
index 84aabba..51b7467 100644
--- a/abseil-cpp/absl/container/internal/node_hash_policy_test.cc
+++ b/abseil-cpp/absl/container/internal/node_slot_policy_test.cc
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
 
 #include <memory>
 
@@ -27,7 +27,7 @@
 
 using ::testing::Pointee;
 
-struct Policy : node_hash_policy<int&, Policy> {
+struct Policy : node_slot_policy<int&, Policy> {
   using key_type = int;
   using init_type = int;
 
diff --git a/abseil-cpp/absl/container/internal/raw_hash_map.h b/abseil-cpp/absl/container/internal/raw_hash_map.h
index 0a02757..2d5a871 100644
--- a/abseil-cpp/absl/container/internal/raw_hash_map.h
+++ b/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -51,8 +51,9 @@
   using key_arg = typename KeyArgImpl::template type<K, key_type>;
 
   static_assert(!std::is_reference<key_type>::value, "");
-  // TODO(alkis): remove this assertion and verify that reference mapped_type is
-  // supported.
+
+  // TODO(b/187807849): Evaluate whether to support reference mapped_type and
+  // remove this assertion if/when it is supported.
   static_assert(!std::is_reference<mapped_type>::value, "");
 
   using iterator = typename raw_hash_map::raw_hash_set::iterator;
@@ -70,43 +71,51 @@
   //   m.insert_or_assign(n, n);
   template <class K = key_type, class V = mapped_type, K* = nullptr,
             V* = nullptr>
-  std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+  std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
   }
 
   template <class K = key_type, class V = mapped_type, K* = nullptr>
-  std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+  std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(std::forward<K>(k), v);
   }
 
   template <class K = key_type, class V = mapped_type, V* = nullptr>
-  std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(k, std::forward<V>(v));
   }
 
   template <class K = key_type, class V = mapped_type>
-  std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign_impl(k, v);
   }
 
   template <class K = key_type, class V = mapped_type, K* = nullptr,
             V* = nullptr>
-  iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+  iterator insert_or_assign(const_iterator, key_arg<K>&& k,
+                            V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
   }
 
   template <class K = key_type, class V = mapped_type, K* = nullptr>
-  iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+  iterator insert_or_assign(const_iterator, key_arg<K>&& k,
+                            const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign(std::forward<K>(k), v).first;
   }
 
   template <class K = key_type, class V = mapped_type, V* = nullptr>
-  iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+  iterator insert_or_assign(const_iterator, const key_arg<K>& k,
+                            V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign(k, std::forward<V>(v)).first;
   }
 
   template <class K = key_type, class V = mapped_type>
-  iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+  iterator insert_or_assign(const_iterator, const key_arg<K>& k,
+                            const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert_or_assign(k, v).first;
   }
 
@@ -117,29 +126,33 @@
             typename std::enable_if<
                 !std::is_convertible<K, const_iterator>::value, int>::type = 0,
             K* = nullptr>
-  std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+  std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
   }
 
   template <class K = key_type, class... Args,
             typename std::enable_if<
                 !std::is_convertible<K, const_iterator>::value, int>::type = 0>
-  std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+  std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace_impl(k, std::forward<Args>(args)...);
   }
 
   template <class K = key_type, class... Args, K* = nullptr>
-  iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+  iterator try_emplace(const_iterator, key_arg<K>&& k,
+                       Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
   }
 
   template <class K = key_type, class... Args>
-  iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+  iterator try_emplace(const_iterator, const key_arg<K>& k,
+                       Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return try_emplace(k, std::forward<Args>(args)...).first;
   }
 
   template <class K = key_type, class P = Policy>
-  MappedReference<P> at(const key_arg<K>& key) {
+  MappedReference<P> at(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = this->find(key);
     if (it == this->end()) {
       base_internal::ThrowStdOutOfRange(
@@ -149,7 +162,8 @@
   }
 
   template <class K = key_type, class P = Policy>
-  MappedConstReference<P> at(const key_arg<K>& key) const {
+  MappedConstReference<P> at(const key_arg<K>& key) const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = this->find(key);
     if (it == this->end()) {
       base_internal::ThrowStdOutOfRange(
@@ -159,18 +173,21 @@
   }
 
   template <class K = key_type, class P = Policy, K* = nullptr>
-  MappedReference<P> operator[](key_arg<K>&& key) {
+  MappedReference<P> operator[](key_arg<K>&& key)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return Policy::value(&*try_emplace(std::forward<K>(key)).first);
   }
 
   template <class K = key_type, class P = Policy>
-  MappedReference<P> operator[](const key_arg<K>& key) {
+  MappedReference<P> operator[](const key_arg<K>& key)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return Policy::value(&*try_emplace(key).first);
   }
 
  private:
   template <class K, class V>
-  std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+  std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto res = this->find_or_prepare_insert(k);
     if (res.second)
       this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
@@ -180,7 +197,8 @@
   }
 
   template <class K = key_type, class... Args>
-  std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+  std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto res = this->find_or_prepare_insert(k);
     if (res.second)
       this->emplace_at(res.first, std::piecewise_construct,
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set.cc b/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 919ac07..2ff95b6 100644
--- a/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -15,20 +15,50 @@
 #include "absl/container/internal/raw_hash_set.h"
 
 #include <atomic>
+#include <cassert>
 #include <cstddef>
+#include <cstring>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/hash/hash.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
 
+// We have space for `growth_left` before a single block of control bytes. A
+// single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find(). In order to ensure
+// that the control bytes are aligned to 16, we have 16 bytes before the control
+// bytes even though growth_left only needs 8.
+constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
+    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
+    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
+    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
+    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
+    ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+    ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+    ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+    ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr size_t Group::kWidth;
+#endif
+
+namespace {
 
 // Returns "random" seed.
 inline size_t RandomSeed() {
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
   static thread_local size_t counter = 0;
+  // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when
+  // accessing thread local storage data from loaded libraries
+  // (https://github.com/google/sanitizers/issues/1265), for this reason counter
+  // needs to be annotated as initialized.
+  ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t));
   size_t value = ++counter;
 #else   // ABSL_HAVE_THREAD_LOCAL
   static std::atomic<size_t> counter(0);
@@ -37,12 +67,197 @@
   return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
 }
 
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
+}  // namespace
+
+GenerationType* EmptyGeneration() {
+  if (SwisstableGenerationsEnabled()) {
+    constexpr size_t kNumEmptyGenerations = 1024;
+    static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
+    return const_cast<GenerationType*>(
+        &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
+  }
+  return nullptr;
+}
+
+bool CommonFieldsGenerationInfoEnabled::
+    should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+                                              size_t capacity) const {
+  if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
+  if (reserved_growth_ > 0) return false;
+  // Note: we can't use the abseil-random library because abseil-random
+  // depends on swisstable. We want to return true with probability
+  // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
+  // we probe based on a random hash and see if the offset is less than
+  // RehashProbabilityConstant().
+  return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
+         RehashProbabilityConstant();
+}
+
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
   // To avoid problems with weak hashes and single bit tests, we use % 13.
   // TODO(kfm,sbenza): revisit after we do unconditional mixing
   return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
 }
 
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
+  assert(ctrl[capacity] == ctrl_t::kSentinel);
+  assert(IsValidCapacity(capacity));
+  for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
+    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+  }
+  // Copy the cloned ctrl bytes.
+  std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
+  ctrl[capacity] = ctrl_t::kSentinel;
+}
+// Extern template instantiation for inline function.
+template FindInfo find_first_non_full(const CommonFields&, size_t);
+
+FindInfo find_first_non_full_outofline(const CommonFields& common,
+                                       size_t hash) {
+  return find_first_non_full(common, hash);
+}
+
+// Returns the address of the ith slot in slots where each slot occupies
+// slot_size.
+static inline void* SlotAddress(void* slot_array, size_t slot,
+                                size_t slot_size) {
+  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
+                                 (slot * slot_size));
+}
+
+// Returns the address of the slot just after slot assuming each slot has the
+// specified size.
+static inline void* NextSlot(void* slot, size_t slot_size) {
+  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
+}
+
+// Returns the address of the slot just before slot assuming each slot has the
+// specified size.
+static inline void* PrevSlot(void* slot, size_t slot_size) {
+  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
+}
+
+void DropDeletesWithoutResize(CommonFields& common,
+                              const PolicyFunctions& policy, void* tmp_space) {
+  void* set = &common;
+  void* slot_array = common.slot_array();
+  const size_t capacity = common.capacity();
+  assert(IsValidCapacity(capacity));
+  assert(!is_small(capacity));
+  // Algorithm:
+  // - mark all DELETED slots as EMPTY
+  // - mark all FULL slots as DELETED
+  // - for each slot marked as DELETED
+  //     hash = Hash(element)
+  //     target = find_first_non_full(hash)
+  //     if target is in the same group
+  //       mark slot as FULL
+  //     else if target is EMPTY
+  //       transfer element to target
+  //       mark slot as EMPTY
+  //       mark target as FULL
+  //     else if target is DELETED
+  //       swap current element with target element
+  //       mark target as FULL
+  //       repeat procedure for current slot with moved from element (target)
+  ctrl_t* ctrl = common.control();
+  ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
+  auto hasher = policy.hash_slot;
+  auto transfer = policy.transfer;
+  const size_t slot_size = policy.slot_size;
+
+  size_t total_probe_length = 0;
+  void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
+  for (size_t i = 0; i != capacity;
+       ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
+    assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
+    if (!IsDeleted(ctrl[i])) continue;
+    const size_t hash = (*hasher)(set, slot_ptr);
+    const FindInfo target = find_first_non_full(common, hash);
+    const size_t new_i = target.offset;
+    total_probe_length += target.probe_length;
+
+    // Verify if the old and new i fall within the same group wrt the hash.
+    // If they do, we don't need to move the object as it falls already in the
+    // best probe we can.
+    const size_t probe_offset = probe(common, hash).offset();
+    const auto probe_index = [probe_offset, capacity](size_t pos) {
+      return ((pos - probe_offset) & capacity) / Group::kWidth;
+    };
+
+    // Element doesn't move.
+    if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+      SetCtrl(common, i, H2(hash), slot_size);
+      continue;
+    }
+
+    void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
+    if (IsEmpty(ctrl[new_i])) {
+      // Transfer element to the empty spot.
+      // SetCtrl poisons/unpoisons the slots so we have to call it at the
+      // right time.
+      SetCtrl(common, new_i, H2(hash), slot_size);
+      (*transfer)(set, new_slot_ptr, slot_ptr);
+      SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
+    } else {
+      assert(IsDeleted(ctrl[new_i]));
+      SetCtrl(common, new_i, H2(hash), slot_size);
+      // Until we are done rehashing, DELETED marks previously FULL slots.
+
+      // Swap i and new_i elements.
+      (*transfer)(set, tmp_space, new_slot_ptr);
+      (*transfer)(set, new_slot_ptr, slot_ptr);
+      (*transfer)(set, slot_ptr, tmp_space);
+
+      // repeat the processing of the ith slot
+      --i;
+      slot_ptr = PrevSlot(slot_ptr, slot_size);
+    }
+  }
+  ResetGrowthLeft(common);
+  common.infoz().RecordRehash(total_probe_length);
+}
+
+void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
+  assert(IsFull(*it) && "erasing a dangling iterator");
+  c.set_size(c.size() - 1);
+  const auto index = static_cast<size_t>(it - c.control());
+  const size_t index_before = (index - Group::kWidth) & c.capacity();
+  const auto empty_after = Group(it).MaskEmpty();
+  const auto empty_before = Group(c.control() + index_before).MaskEmpty();
+
+  // We count how many consecutive non empties we have to the right and to the
+  // left of `it`. If the sum is >= kWidth then there is at least one probe
+  // window that might have seen a full group.
+  bool was_never_full = empty_before && empty_after &&
+                        static_cast<size_t>(empty_after.TrailingZeros()) +
+                                empty_before.LeadingZeros() <
+                            Group::kWidth;
+
+  SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
+          slot_size);
+  c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0));
+  c.infoz().RecordErase();
+}
+
+void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
+                       bool reuse) {
+  c.set_size(0);
+  if (reuse) {
+    ResetCtrl(c, policy.slot_size);
+    c.infoz().RecordStorageChanged(0, c.capacity());
+  } else {
+    (*policy.dealloc)(c, policy);
+    c.set_control(EmptyGroup());
+    c.set_generation_ptr(EmptyGeneration());
+    c.set_slots(nullptr);
+    c.set_capacity(0);
+    c.infoz().RecordClearedReservation();
+    assert(c.size() == 0);
+    c.infoz().RecordStorageChanged(0, 0);
+  }
+}
+
 }  // namespace container_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set.h b/abseil-cpp/absl/container/internal/raw_hash_set.h
index ec13a2f..5f89d8e 100644
--- a/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -53,75 +53,208 @@
 //
 // IMPLEMENTATION DETAILS
 //
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
+// # Table Layout
+//
+// A raw_hash_set's backing array consists of control bytes followed by slots
+// that may or may not contain objects.
+//
+// The layout of the backing array, for `capacity` slots, is thus, as a
+// pseudo-struct:
+//
+//   struct BackingArray {
+//     // The number of elements we can insert before growing the capacity.
+//     size_t growth_left;
+//     // Control bytes for the "real" slots.
+//     ctrl_t ctrl[capacity];
+//     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
+//     // stop and serves no other purpose.
+//     ctrl_t sentinel;
+//     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
+//     // that if a probe sequence picks a value near the end of `ctrl`,
+//     // `Group` will have valid control bytes to look at.
+//     ctrl_t clones[kWidth - 1];
+//     // The actual slot data.
+//     slot_type slots[capacity];
+//   };
+//
+// The length of this array is computed by `AllocSize()` below.
+//
+// Control bytes (`ctrl_t`) are bytes (collected into groups of a
+// platform-specific size) that define the state of the corresponding slot in
+// the slot array. Group manipulation is tightly optimized to be as efficient
+// as possible: SSE and friends on x86, clever bit operations on other arches.
 //
 //      Group 1         Group 2        Group 3
 // +---------------+---------------+---------------+
 // | | | | | | | | | | | | | | | | | | | | | | | | |
 // +---------------+---------------+---------------+
 //
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+// Each control byte is either a special value for empty slots, deleted slots
+// (sometimes called *tombstones*), and a special end-of-table marker used by
+// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
+// corresponding slot.
 //
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
+// Storing control bytes in a separate array also has beneficial cache effects,
+// since more logical slots will fit into a cache line.
 //
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
+// # Hashing
 //
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
+// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
+// `H1(hash(x))` is an index into `slots`, and essentially the starting point
+// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
+// objects that cannot possibly be the one we are looking for.
 //
-//   P(0) = H1 % N
+// # Table operations.
 //
-//   P(i) = (P(i - 1) + i) % N
+// The key operations are `insert`, `find`, and `erase`.
 //
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// Since `insert` and `erase` are implemented in terms of `find`, we describe
+// `find` first. To `find` a value `x`, we compute `hash(x)`. From
+// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
+// group of slots in some interesting order.
+//
+// We now walk through these indices. At each index, we select the entire group
+// starting with that index and extract potential candidates: occupied slots
+// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
+// group, we stop and return an error. Each candidate slot `y` is compared with
+// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
+// next probe index. Tombstones effectively behave like full slots that never
+// match the value we're looking for.
+//
+// The `H2` bits ensure when we compare a slot to an object with `==`, we are
+// likely to have actually found the object.  That is, the chance is low that
+// `==` is called and returns `false`.  Thus, when we search for an object, we
+// are unlikely to call `==` many times.  This likelyhood can be analyzed as
+// follows (assuming that H2 is a random enough hash function).
+//
+// Let's assume that there are `k` "wrong" objects that must be examined in a
+// probe sequence.  For example, when doing a `find` on an object that is in the
+// table, `k` is the number of objects between the start of the probe sequence
+// and the final found object (not including the final found object).  The
+// expected number of objects with an H2 match is then `k/128`.  Measurements
+// and analysis indicate that even at high load factors, `k` is less than 32,
+// meaning that the number of "false positive" comparisons we must perform is
+// less than 1/8 per `find`.
+
+// `insert` is implemented in terms of `unchecked_insert`, which inserts a
+// value presumed to not be in the table (violating this requirement will cause
+// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
+// it, we construct a `probe_seq` once again, and use it to find the first
+// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
+// first such slot in the group and mark it as full with `x`'s H2.
+//
+// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
+// perform a `find` to see if it's already present; if it is, we're done. If
+// it's not, we may decide the table is getting overcrowded (i.e. the load
+// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
+// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
+// each element of the table into the new array (we know that no insertion here
+// will insert an already-present value), and discard the old backing array. At
+// this point, we may `unchecked_insert` the value `x`.
+//
+// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
+// presents a viable, initialized slot pointee to the caller.
+//
+// `erase` is implemented in terms of `erase_at`, which takes an index to a
+// slot. Given an offset, we simply create a tombstone and destroy its contents.
+// If we can prove that the slot would not appear in a probe sequence, we can
+// make the slot as empty, instead. We can prove this by observing that if a
+// group has any empty slots, it has never been full (assuming we never create
+// an empty slot in a group with no empties, which this heuristic guarantees we
+// never do) and find would stop at this group anyways (since it does not probe
+// beyond groups with empties).
+//
+// `erase` is `erase_at` composed with `find`: if we
+// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
+// slot.
+//
+// To iterate, we simply traverse the array, skipping empty and deleted slots
+// and stopping when we hit a `kSentinel`.
 
 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
 
 #include <algorithm>
 #include <cmath>
+#include <cstddef>
 #include <cstdint>
 #include <cstring>
 #include <iterator>
 #include <limits>
 #include <memory>
+#include <string>
 #include <tuple>
 #include <type_traits>
 #include <utility>
 
-#include "absl/base/internal/bits.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/endian.h"
+#include "absl/base/internal/raw_logging.h"
 #include "absl/base/optimization.h"
 #include "absl/base/port.h"
+#include "absl/base/prefetch.h"
 #include "absl/container/internal/common.h"
 #include "absl/container/internal/compressed_tuple.h"
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hash_policy_traits.h"
 #include "absl/container/internal/hashtable_debug_hooks.h"
 #include "absl/container/internal/hashtablez_sampler.h"
-#include "absl/container/internal/have_sse.h"
-#include "absl/container/internal/layout.h"
 #include "absl/memory/memory.h"
 #include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
 #include "absl/utility/utility.h"
 
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#include <arm_neon.h>
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
 
+#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+    defined(ABSL_HAVE_MEMORY_SANITIZER)
+// When compiled in sanitizer mode, we add generation integers to the backing
+// array and iterators. In the backing array, we store the generation between
+// the control bytes and the slots. When iterators are dereferenced, we assert
+// that the container has not been mutated in a way that could cause iterator
+// invalidation since the iterator was initialized.
+#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
+#endif
+
+// We use uint8_t so we don't need to worry about padding.
+using GenerationType = uint8_t;
+
+// A sentinel value for empty generations. Using 0 makes it easy to constexpr
+// initialize an array of this value.
+constexpr GenerationType SentinelEmptyGeneration() { return 0; }
+
+constexpr GenerationType NextGeneration(GenerationType generation) {
+  return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
+}
+
+#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+constexpr bool SwisstableGenerationsEnabled() { return true; }
+constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
+#else
+constexpr bool SwisstableGenerationsEnabled() { return false; }
+constexpr size_t NumGenerationBytes() { return 0; }
+#endif
+
 template <typename AllocType>
 void SwapAlloc(AllocType& lhs, AllocType& rhs,
                std::true_type /* propagate_on_container_swap */) {
@@ -132,14 +265,40 @@
 void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
                std::false_type /* propagate_on_container_swap */) {}
 
+// The state for a probe sequence.
+//
+// Currently, the sequence is a triangular progression of the form
+//
+//   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
+//
+// The use of `Width` ensures that each probe step does not overlap groups;
+// the sequence effectively outputs the addresses of *groups* (although not
+// necessarily aligned to any boundary). The `Group` machinery allows us
+// to check an entire group with minimal branching.
+//
+// Wrapping around at `mask + 1` is important, but not for the obvious reason.
+// As described above, the first few entries of the control byte array
+// are mirrored at the end of the array, which `Group` will find and use
+// for selecting candidates. However, when those candidates' slots are
+// actually inspected, there are no corresponding slots for the cloned bytes,
+// so we need to make sure we've treated those offsets as "wrapping around".
+//
+// It turns out that this probe sequence visits every group exactly once if the
+// number of groups is a power of two, since (i^2+i)/2 is a bijection in
+// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
 template <size_t Width>
 class probe_seq {
  public:
+  // Creates a new probe sequence using `hash` as the initial value of the
+  // sequence and `mask` (usually the capacity of the table) as the mask to
+  // apply to each value in the progression.
   probe_seq(size_t hash, size_t mask) {
     assert(((mask + 1) & mask) == 0 && "not a mask");
     mask_ = mask;
     offset_ = hash & mask_;
   }
+
+  // The offset within the table, i.e., the value `p(i)` above.
   size_t offset() const { return offset_; }
   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
 
@@ -148,7 +307,7 @@
     offset_ += index_;
     offset_ &= mask_;
   }
-  // 0-based probe index. The i-th probe in the probe sequence.
+  // 0-based probe index, a multiple of `Width`.
   size_t index() const { return index_; }
 
  private:
@@ -172,9 +331,9 @@
 
 template <class Policy, class Hash, class Eq, class... Ts>
 struct IsDecomposable<
-    absl::void_t<decltype(
-        Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
-                      std::declval<Ts>()...))>,
+    absl::void_t<decltype(Policy::apply(
+        RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+        std::declval<Ts>()...))>,
     Policy, Hash, Eq, Ts...> : std::true_type {};
 
 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
@@ -189,69 +348,85 @@
 }
 
 template <typename T>
-int TrailingZeros(T x) {
-  return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
-                              static_cast<uint64_t>(x))
-                        : base_internal::CountTrailingZerosNonZero32(
-                              static_cast<uint32_t>(x));
+uint32_t TrailingZeros(T x) {
+  ABSL_ASSUME(x != 0);
+  return static_cast<uint32_t>(countr_zero(x));
 }
 
-template <typename T>
-int LeadingZeros(T x) {
-  return sizeof(T) == 8
-             ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
-             : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
-}
+// An abstract bitmask, such as that emitted by a SIMD instruction.
+//
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+// This mask provides operations for any number of real bits set in an abstract
+// bit. To add iteration on top of that, implementation must guarantee no more
+// than one real bit is set in an abstract bit.
+template <class T, int SignificantBits, int Shift = 0>
+class NonIterableBitMask {
+ public:
+  explicit NonIterableBitMask(T mask) : mask_(mask) {}
 
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask.  When Shift=0 (platforms with SSE),
-// this is a true bitmask.  On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
+  explicit operator bool() const { return this->mask_ != 0; }
+
+  // Returns the index of the lowest *abstract* bit set in `self`.
+  uint32_t LowestBitSet() const {
+    return container_internal::TrailingZeros(mask_) >> Shift;
+  }
+
+  // Returns the index of the highest *abstract* bit set in `self`.
+  uint32_t HighestBitSet() const {
+    return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+  }
+
+  // Returns the number of trailing zero *abstract* bits.
+  uint32_t TrailingZeros() const {
+    return container_internal::TrailingZeros(mask_) >> Shift;
+  }
+
+  // Returns the number of leading zero *abstract* bits.
+  uint32_t LeadingZeros() const {
+    constexpr int total_significant_bits = SignificantBits << Shift;
+    constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+    return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
+  }
+
+  T mask_;
+};
+
+// Mask that can be iterable
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
 //
 // For example:
-//   for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+//   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
 template <class T, int SignificantBits, int Shift = 0>
-class BitMask {
+class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
+  using Base = NonIterableBitMask<T, SignificantBits, Shift>;
   static_assert(std::is_unsigned<T>::value, "");
   static_assert(Shift == 0 || Shift == 3, "");
 
  public:
-  // These are useful for unit tests (gunit).
+  explicit BitMask(T mask) : Base(mask) {}
+  // BitMask is an iterator over the indices of its abstract bits.
   using value_type = int;
   using iterator = BitMask;
   using const_iterator = BitMask;
 
-  explicit BitMask(T mask) : mask_(mask) {}
   BitMask& operator++() {
-    mask_ &= (mask_ - 1);
+    this->mask_ &= (this->mask_ - 1);
     return *this;
   }
-  explicit operator bool() const { return mask_ != 0; }
-  int operator*() const { return LowestBitSet(); }
-  int LowestBitSet() const {
-    return container_internal::TrailingZeros(mask_) >> Shift;
-  }
-  int HighestBitSet() const {
-    return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
-            1) >>
-           Shift;
-  }
+
+  uint32_t operator*() const { return Base::LowestBitSet(); }
 
   BitMask begin() const { return *this; }
   BitMask end() const { return BitMask(0); }
 
-  int TrailingZeros() const {
-    return container_internal::TrailingZeros(mask_) >> Shift;
-  }
-
-  int LeadingZeros() const {
-    constexpr int total_significant_bits = SignificantBits << Shift;
-    constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
-    return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
-  }
-
  private:
   friend bool operator==(const BitMask& a, const BitMask& b) {
     return a.mask_ == b.mask_;
@@ -259,75 +434,137 @@
   friend bool operator!=(const BitMask& a, const BitMask& b) {
     return a.mask_ != b.mask_;
   }
-
-  T mask_;
 };
 
-using ctrl_t = signed char;
 using h2_t = uint8_t;
 
 // The values here are selected for maximum performance. See the static asserts
 // below for details.
-enum Ctrl : ctrl_t {
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+//      empty: 1 0 0 0 0 0 0 0
+//    deleted: 1 1 1 1 1 1 1 0
+//       full: 0 h h h h h h h  // h represents the hash bits.
+//   sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
   kEmpty = -128,   // 0b10000000
   kDeleted = -2,   // 0b11111110
   kSentinel = -1,  // 0b11111111
 };
 static_assert(
-    kEmpty & kDeleted & kSentinel & 0x80,
+    (static_cast<int8_t>(ctrl_t::kEmpty) &
+     static_cast<int8_t>(ctrl_t::kDeleted) &
+     static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
     "Special markers need to have the MSB to make checking for them efficient");
-static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
-              "kEmpty and kDeleted must be smaller than kSentinel to make the "
-              "SIMD test of IsEmptyOrDeleted() efficient");
-static_assert(kSentinel == -1,
-              "kSentinel must be -1 to elide loading it from memory into SIMD "
-              "registers (pcmpeqd xmm, xmm)");
-static_assert(kEmpty == -128,
-              "kEmpty must be -128 to make the SIMD check for its "
+static_assert(
+    ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+    "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+    "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+    ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+    "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+    "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+              "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
               "existence efficient (psignb xmm, xmm)");
-static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
-              "kEmpty and kDeleted must share an unset bit that is not shared "
-              "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
-              "efficient");
-static_assert(kDeleted == -2,
-              "kDeleted must be -2 to make the implementation of "
+static_assert(
+    (~static_cast<int8_t>(ctrl_t::kEmpty) &
+     ~static_cast<int8_t>(ctrl_t::kDeleted) &
+     static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+    "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+    "shared by ctrl_t::kSentinel to make the scalar test for "
+    "MaskEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+              "ctrl_t::kDeleted must be -2 to make the implementation of "
               "ConvertSpecialToEmptyAndFullToDeleted efficient");
 
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
+// See definition comment for why this is size 32.
+ABSL_DLL extern const ctrl_t kEmptyGroup[32];
+
+// Returns a pointer to a control byte group that can be used by empty tables.
 inline ctrl_t* EmptyGroup() {
-  alignas(16) static constexpr ctrl_t empty_group[] = {
-      kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
-      kEmpty,    kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
-  return const_cast<ctrl_t*>(empty_group);
+  // Const must be cast away here; no uses of this function will actually write
+  // to it, because it is only used for empty tables.
+  return const_cast<ctrl_t*>(kEmptyGroup + 16);
+}
+
+// Returns a pointer to a generation to use for an empty hashtable.
+GenerationType* EmptyGeneration();
+
+// Returns whether `generation` is a generation for an empty hashtable that
+// could be returned by EmptyGeneration().
+inline bool IsEmptyGeneration(const GenerationType* generation) {
+  return *generation == SentinelEmptyGeneration();
 }
 
 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
 // randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
 
-// Returns a hash seed.
+// Returns a per-table, hash salt, which changes on resize. This gets mixed into
+// H1 to randomize iteration order per-table.
 //
 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
 // non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
+inline size_t PerTableSalt(const ctrl_t* ctrl) {
   // The low bits of the pointer have little or no entropy because of
   // alignment. We shift the pointer to try to use higher entropy bits. A
   // good number seems to be 12 bits, because that aligns with page size.
   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
 }
-
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
-  return (hash >> 7) ^ HashSeed(ctrl);
+  return (hash >> 7) ^ PerTableSalt(ctrl);
 }
-inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
 
-inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= 0; }
-inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
-inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
 
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+// Helpers for checking the state of a control byte.
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+// Quick reference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
+// * _mm_and_si128:    Ands two i128s together.
+// * _mm_or_si128:     Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+//                   filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128:  Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of an i128.
+//
+// * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
+//                      argument if the corresponding lane of the second
+//                      argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+//                      bitmask consisting of those bits.
+// * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
+//                      four bits of each i8 lane in the second argument as
+//                      indices.
 
 // https://github.com/abseil/abseil-cpp/issues/209
 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -354,40 +591,42 @@
 
   // Returns a bitmask representing the positions of slots that match hash.
   BitMask<uint32_t, kWidth> Match(h2_t hash) const {
-    auto match = _mm_set1_epi8(hash);
+    auto match = _mm_set1_epi8(static_cast<char>(hash));
     return BitMask<uint32_t, kWidth>(
-        _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+        static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
   }
 
   // Returns a bitmask representing the positions of empty slots.
-  BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-    // This only works because kEmpty is -128.
-    return BitMask<uint32_t, kWidth>(
-        _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+  NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+    // This only works because ctrl_t::kEmpty is -128.
+    return NonIterableBitMask<uint32_t, kWidth>(
+        static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
 #else
-    return Match(static_cast<h2_t>(kEmpty));
+    auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
+    return NonIterableBitMask<uint32_t, kWidth>(
+        static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
 #endif
   }
 
   // Returns a bitmask representing the positions of empty or deleted slots.
-  BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
-    auto special = _mm_set1_epi8(kSentinel);
-    return BitMask<uint32_t, kWidth>(
-        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+  NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
+    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
+    return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
   }
 
   // Returns the number of trailing empty or deleted elements in the group.
   uint32_t CountLeadingEmptyOrDeleted() const {
-    auto special = _mm_set1_epi8(kSentinel);
-    return TrailingZeros(
-        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
+    return TrailingZeros(static_cast<uint32_t>(
+        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
   }
 
   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
     auto x126 = _mm_set1_epi8(126);
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
 #else
     auto zero = _mm_setzero_si128();
@@ -401,6 +640,67 @@
 };
 #endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
 
+#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+struct GroupAArch64Impl {
+  static constexpr size_t kWidth = 8;
+
+  explicit GroupAArch64Impl(const ctrl_t* pos) {
+    ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
+  }
+
+  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+    uint8x8_t dup = vdup_n_u8(hash);
+    auto mask = vceq_u8(ctrl, dup);
+    constexpr uint64_t msbs = 0x8080808080808080ULL;
+    return BitMask<uint64_t, kWidth, 3>(
+        vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
+  }
+
+  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
+    uint64_t mask =
+        vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
+                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
+                          vreinterpret_s8_u8(ctrl))),
+                      0);
+    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+  }
+
+  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
+    uint64_t mask =
+        vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
+                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+                          vreinterpret_s8_u8(ctrl))),
+                      0);
+    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+  }
+
+  uint32_t CountLeadingEmptyOrDeleted() const {
+    uint64_t mask =
+        vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
+                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+                          vreinterpret_s8_u8(ctrl))),
+                      0);
+    // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
+    // produced bitfield. We then count number of trailing zeros.
+    // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
+    // so we should be fine.
+    return static_cast<uint32_t>(countr_zero(mask)) >> 3;
+  }
+
+  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+    uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+    constexpr uint64_t msbs = 0x8080808080808080ULL;
+    constexpr uint64_t slsbs = 0x0202020202020202ULL;
+    constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
+    auto x = slsbs & (mask >> 6);
+    auto res = (x + midbs) | msbs;
+    little_endian::Store64(dst, res);
+  }
+
+  uint8x8_t ctrl;
+};
+#endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
+
 struct GroupPortableImpl {
   static constexpr size_t kWidth = 8;
 
@@ -414,7 +714,7 @@
     //
     // Caveat: there are false positives but:
     // - they only occur if there is a real match
-    // - they never occur on kEmpty, kDeleted, kSentinel
+    // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
     // - they will be handled gracefully by subsequent checks in code
     //
     // Example:
@@ -427,19 +727,24 @@
     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
   }
 
-  BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
     constexpr uint64_t msbs = 0x8080808080808080ULL;
-    return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+    return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
+                                                   msbs);
   }
 
-  BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
     constexpr uint64_t msbs = 0x8080808080808080ULL;
-    return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+    return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
+                                                   msbs);
   }
 
   uint32_t CountLeadingEmptyOrDeleted() const {
-    constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
-    return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+    // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+    // kDeleted. We lower all other bits and count number of trailing zeros.
+    constexpr uint64_t bits = 0x0101010101010101ULL;
+    return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
+                                 3);
   }
 
   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -453,44 +758,334 @@
   uint64_t ctrl;
 };
 
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
 using Group = GroupSse2Impl;
+#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+using Group = GroupAArch64Impl;
 #else
 using Group = GroupPortableImpl;
 #endif
 
+// When there is an insertion with no reserved growth, we rehash with
+// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
+// constant divided by capacity ensures that inserting N elements is still O(N)
+// in the average case. Using the constant 16 means that we expect to rehash ~8
+// times more often than when generations are disabled. We are adding expected
+// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
+// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
+inline size_t RehashProbabilityConstant() { return 16; }
+
+class CommonFieldsGenerationInfoEnabled {
+  // A sentinel value for reserved_growth_ indicating that we just ran out of
+  // reserved growth on the last insertion. When reserve is called and then
+  // insertions take place, reserved_growth_'s state machine is N, ..., 1,
+  // kReservedGrowthJustRanOut, 0.
+  static constexpr size_t kReservedGrowthJustRanOut =
+      (std::numeric_limits<size_t>::max)();
+
+ public:
+  CommonFieldsGenerationInfoEnabled() = default;
+  CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
+      : reserved_growth_(that.reserved_growth_),
+        reservation_size_(that.reservation_size_),
+        generation_(that.generation_) {
+    that.reserved_growth_ = 0;
+    that.reservation_size_ = 0;
+    that.generation_ = EmptyGeneration();
+  }
+  CommonFieldsGenerationInfoEnabled& operator=(
+      CommonFieldsGenerationInfoEnabled&&) = default;
+
+  // Whether we should rehash on insert in order to detect bugs of using invalid
+  // references. We rehash on the first insertion after reserved_growth_ reaches
+  // 0 after a call to reserve. We also do a rehash with low probability
+  // whenever reserved_growth_ is zero.
+  bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+                                                 size_t capacity) const;
+  void maybe_increment_generation_on_insert() {
+    if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
+
+    if (reserved_growth_ > 0) {
+      if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
+    } else {
+      *generation_ = NextGeneration(*generation_);
+    }
+  }
+  void reset_reserved_growth(size_t reservation, size_t size) {
+    reserved_growth_ = reservation - size;
+  }
+  size_t reserved_growth() const { return reserved_growth_; }
+  void set_reserved_growth(size_t r) { reserved_growth_ = r; }
+  size_t reservation_size() const { return reservation_size_; }
+  void set_reservation_size(size_t r) { reservation_size_ = r; }
+  GenerationType generation() const { return *generation_; }
+  void set_generation(GenerationType g) { *generation_ = g; }
+  GenerationType* generation_ptr() const { return generation_; }
+  void set_generation_ptr(GenerationType* g) { generation_ = g; }
+
+ private:
+  // The number of insertions remaining that are guaranteed to not rehash due to
+  // a prior call to reserve. Note: we store reserved growth in addition to
+  // reservation size because calls to erase() decrease size_ but don't decrease
+  // reserved growth.
+  size_t reserved_growth_ = 0;
+  // The maximum argument to reserve() since the container was cleared. We need
+  // to keep track of this, in addition to reserved growth, because we reset
+  // reserved growth to this when erase(begin(), end()) is called.
+  size_t reservation_size_ = 0;
+  // Pointer to the generation counter, which is used to validate iterators and
+  // is stored in the backing array between the control bytes and the slots.
+  // Note that we can't store the generation inside the container itself and
+  // keep a pointer to the container in the iterators because iterators must
+  // remain valid when the container is moved.
+  // Note: we could derive this pointer from the control pointer, but it makes
+  // the code more complicated, and there's a benefit in having the sizes of
+  // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
+  // which is that tests are less likely to rely on the size remaining the same.
+  GenerationType* generation_ = EmptyGeneration();
+};
+
+class CommonFieldsGenerationInfoDisabled {
+ public:
+  CommonFieldsGenerationInfoDisabled() = default;
+  CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
+      default;
+  CommonFieldsGenerationInfoDisabled& operator=(
+      CommonFieldsGenerationInfoDisabled&&) = default;
+
+  bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
+    return false;
+  }
+  void maybe_increment_generation_on_insert() {}
+  void reset_reserved_growth(size_t, size_t) {}
+  size_t reserved_growth() const { return 0; }
+  void set_reserved_growth(size_t) {}
+  size_t reservation_size() const { return 0; }
+  void set_reservation_size(size_t) {}
+  GenerationType generation() const { return 0; }
+  void set_generation(GenerationType) {}
+  GenerationType* generation_ptr() const { return nullptr; }
+  void set_generation_ptr(GenerationType*) {}
+};
+
+class HashSetIteratorGenerationInfoEnabled {
+ public:
+  HashSetIteratorGenerationInfoEnabled() = default;
+  explicit HashSetIteratorGenerationInfoEnabled(
+      const GenerationType* generation_ptr)
+      : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
+
+  GenerationType generation() const { return generation_; }
+  void reset_generation() { generation_ = *generation_ptr_; }
+  const GenerationType* generation_ptr() const { return generation_ptr_; }
+  void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
+
+ private:
+  const GenerationType* generation_ptr_ = EmptyGeneration();
+  GenerationType generation_ = *generation_ptr_;
+};
+
+class HashSetIteratorGenerationInfoDisabled {
+ public:
+  HashSetIteratorGenerationInfoDisabled() = default;
+  explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
+
+  GenerationType generation() const { return 0; }
+  void reset_generation() {}
+  const GenerationType* generation_ptr() const { return nullptr; }
+  void set_generation_ptr(const GenerationType*) {}
+};
+
+#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
+using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
+#else
+using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
+using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
+#endif
+
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// Computes the offset from the start of the backing allocation of the control
+// bytes. growth_left is stored at the beginning of the backing array.
+inline size_t ControlOffset() { return sizeof(size_t); }
+
+// Returns the number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) of the generation counter (if it exists).
+inline size_t GenerationOffset(size_t capacity) {
+  assert(IsValidCapacity(capacity));
+  const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+  return ControlOffset() + num_control_bytes;
+}
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+  assert(IsValidCapacity(capacity));
+  return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
+         (~slot_align + 1);
+}
+
+// Given the capacity of a table, computes the total size of the backing
+// array.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+  return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
+// CommonFields hold the fields in raw_hash_set that do not depend
+// on template parameters. This allows us to conveniently pass all
+// of this state to helper functions as a single argument.
+class CommonFields : public CommonFieldsGenerationInfo {
+ public:
+  CommonFields() = default;
+
+  // Not copyable
+  CommonFields(const CommonFields&) = delete;
+  CommonFields& operator=(const CommonFields&) = delete;
+
+  // Movable
+  CommonFields(CommonFields&& that)
+      : CommonFieldsGenerationInfo(
+            std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
+        // Explicitly copying fields into "this" and then resetting "that"
+        // fields generates less code then calling absl::exchange per field.
+        control_(that.control()),
+        slots_(that.slot_array()),
+        capacity_(that.capacity()),
+        compressed_tuple_(that.size(), std::move(that.infoz())) {
+    that.set_control(EmptyGroup());
+    that.set_slots(nullptr);
+    that.set_capacity(0);
+    that.set_size(0);
+  }
+  CommonFields& operator=(CommonFields&&) = default;
+
+  ctrl_t* control() const { return control_; }
+  void set_control(ctrl_t* c) { control_ = c; }
+  void* backing_array_start() const {
+    // growth_left is stored before control bytes.
+    assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
+    return control() - sizeof(size_t);
+  }
+
+  // Note: we can't use slots() because Qt defines "slots" as a macro.
+  void* slot_array() const { return slots_; }
+  void set_slots(void* s) { slots_ = s; }
+
+  // The number of filled slots.
+  size_t size() const { return compressed_tuple_.template get<0>(); }
+  void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
+
+  // The total number of available slots.
+  size_t capacity() const { return capacity_; }
+  void set_capacity(size_t c) {
+    assert(c == 0 || IsValidCapacity(c));
+    capacity_ = c;
+  }
+
+  // The number of slots we can still fill without needing to rehash.
+  // This is stored in the heap allocation before the control bytes.
+  size_t growth_left() const {
+    return *reinterpret_cast<size_t*>(backing_array_start());
+  }
+  void set_growth_left(size_t gl) {
+    *reinterpret_cast<size_t*>(backing_array_start()) = gl;
+  }
+
+  HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
+  const HashtablezInfoHandle& infoz() const {
+    return compressed_tuple_.template get<1>();
+  }
+
+  bool should_rehash_for_bug_detection_on_insert() const {
+    return CommonFieldsGenerationInfo::
+        should_rehash_for_bug_detection_on_insert(control(), capacity());
+  }
+  void reset_reserved_growth(size_t reservation) {
+    CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
+  }
+
+  // The size of the backing array allocation.
+  size_t alloc_size(size_t slot_size, size_t slot_align) const {
+    return AllocSize(capacity(), slot_size, slot_align);
+  }
+
+  // Returns the number of control bytes set to kDeleted. For testing only.
+  size_t TombstonesCount() const {
+    return static_cast<size_t>(
+        std::count(control(), control() + capacity(), ctrl_t::kDeleted));
+  }
+
+ private:
+  // TODO(b/259599413): Investigate removing some of these fields:
+  // - control/slots can be derived from each other
+  // - we can use 6 bits for capacity since it's always a power of two minus 1
+
+  // The control bytes (and, also, a pointer near to the base of the backing
+  // array).
+  //
+  // This contains `capacity + 1 + NumClonedBytes()` entries, even
+  // when the table is empty (hence EmptyGroup).
+  //
+  // Note that growth_left is stored immediately before this pointer.
+  ctrl_t* control_ = EmptyGroup();
+
+  // The beginning of the slots, located at `SlotOffset()` bytes after
+  // `control`. May be null for empty tables.
+  void* slots_ = nullptr;
+
+  size_t capacity_ = 0;
+
+  // Bundle together size and HashtablezInfoHandle to ensure EBO for
+  // HashtablezInfoHandle when sampling is turned off.
+  absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
+      compressed_tuple_{0u, HashtablezInfoHandle{}};
+};
+
 template <class Policy, class Hash, class Eq, class Alloc>
 class raw_hash_set;
 
-inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+// Returns the next valid capacity after `n`.
+inline size_t NextCapacity(size_t n) {
+  assert(IsValidCapacity(n) || n == 0);
+  return n * 2 + 1;
+}
 
+// Applies the following mapping to every byte in the control array:
+//   * kDeleted -> kEmpty
+//   * kEmpty -> kEmpty
+//   * _ -> kDeleted
 // PRECONDITION:
 //   IsValidCapacity(capacity)
-//   ctrl[capacity] == kSentinel
-//   ctrl[i] != kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-//   DELETED -> EMPTY
-//   EMPTY -> EMPTY
-//   FULL -> DELETED
-inline void ConvertDeletedToEmptyAndFullToDeleted(
-    ctrl_t* ctrl, size_t capacity) {
-  assert(ctrl[capacity] == kSentinel);
-  assert(IsValidCapacity(capacity));
-  for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
-    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
-  }
-  // Copy the cloned ctrl bytes.
-  std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
-  ctrl[capacity] = kSentinel;
-}
+//   ctrl[capacity] == ctrl_t::kSentinel
+//   ctrl[i] != ctrl_t::kSentinel for all i < capacity
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
 
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
 inline size_t NormalizeCapacity(size_t n) {
-  return n ? ~size_t{} >> LeadingZeros(n) : 1;
+  return n ? ~size_t{} >> countl_zero(n) : 1;
 }
 
-// We use 7/8th as maximum load factor.
-// For 16-wide groups, that gives an average of two empty slots per group.
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+//   average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+//   never need to probe (the whole table fits in one group) so we don't need a
+//   load factor less than 1.
+
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
 inline size_t CapacityToGrowth(size_t capacity) {
   assert(IsValidCapacity(capacity));
   // `capacity*7/8`
@@ -500,8 +1095,12 @@
   }
   return capacity - capacity / 8;
 }
-// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and required NormalizeCapacity().
+
+// Given `growth`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// This might not be a valid capacity and `NormalizeCapacity()` should be
+// called on this.
 inline size_t GrowthToLowerboundCapacity(size_t growth) {
   // `growth*8/7`
   if (Group::kWidth == 8 && growth == 7) {
@@ -511,18 +1110,371 @@
   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
 }
 
-inline void AssertIsFull(ctrl_t* ctrl) {
-  ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
-                        "Invalid operation on iterator. The element might have "
-                        "been erased, or the table might have rehashed.");
+template <class InputIter>
+size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
+                                     size_t bucket_count) {
+  if (bucket_count != 0) {
+    return bucket_count;
+  }
+  using InputIterCategory =
+      typename std::iterator_traits<InputIter>::iterator_category;
+  if (std::is_base_of<std::random_access_iterator_tag,
+                      InputIterCategory>::value) {
+    return GrowthToLowerboundCapacity(
+        static_cast<size_t>(std::distance(first, last)));
+  }
+  return 0;
 }
 
-inline void AssertIsValid(ctrl_t* ctrl) {
-  ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
-                        "Invalid operation on iterator. The element might have "
-                        "been erased, or the table might have rehashed.");
+constexpr bool SwisstableDebugEnabled() {
+#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
+    ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
+  return true;
+#else
+  return false;
+#endif
 }
 
+inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
+                         const GenerationType* generation_ptr,
+                         const char* operation) {
+  if (!SwisstableDebugEnabled()) return;
+  if (ctrl == nullptr) {
+    ABSL_INTERNAL_LOG(FATAL,
+                      std::string(operation) + " called on end() iterator.");
+  }
+  if (ctrl == EmptyGroup()) {
+    ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
+                                 " called on default-constructed iterator.");
+  }
+  if (SwisstableGenerationsEnabled()) {
+    if (generation != *generation_ptr) {
+      ABSL_INTERNAL_LOG(FATAL,
+                        std::string(operation) +
+                            " called on invalid iterator. The table could have "
+                            "rehashed since this iterator was initialized.");
+    }
+    if (!IsFull(*ctrl)) {
+      ABSL_INTERNAL_LOG(
+          FATAL,
+          std::string(operation) +
+              " called on invalid iterator. The element was likely erased.");
+    }
+  } else {
+    if (!IsFull(*ctrl)) {
+      ABSL_INTERNAL_LOG(
+          FATAL,
+          std::string(operation) +
+              " called on invalid iterator. The element might have been erased "
+              "or the table might have rehashed. Consider running with "
+              "--config=asan to diagnose rehashing issues.");
+    }
+  }
+}
+
+// Note that for comparisons, null/end iterators are valid.
+inline void AssertIsValidForComparison(const ctrl_t* ctrl,
+                                       GenerationType generation,
+                                       const GenerationType* generation_ptr) {
+  if (!SwisstableDebugEnabled()) return;
+  const bool ctrl_is_valid_for_comparison =
+      ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
+  if (SwisstableGenerationsEnabled()) {
+    if (generation != *generation_ptr) {
+      ABSL_INTERNAL_LOG(FATAL,
+                        "Invalid iterator comparison. The table could have "
+                        "rehashed since this iterator was initialized.");
+    }
+    if (!ctrl_is_valid_for_comparison) {
+      ABSL_INTERNAL_LOG(
+          FATAL, "Invalid iterator comparison. The element was likely erased.");
+    }
+  } else {
+    ABSL_HARDENING_ASSERT(
+        ctrl_is_valid_for_comparison &&
+        "Invalid iterator comparison. The element might have been erased or "
+        "the table might have rehashed. Consider running with --config=asan to "
+        "diagnose rehashing issues.");
+  }
+}
+
+// If the two iterators come from the same container, then their pointers will
+// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
+// Note: we take slots by reference so that it's not UB if they're uninitialized
+// as long as we don't read them (when ctrl is null).
+inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
+                                      const ctrl_t* ctrl_b,
+                                      const void* const& slot_a,
+                                      const void* const& slot_b) {
+  // If either control byte is null, then we can't tell.
+  if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
+  const void* low_slot = slot_a;
+  const void* hi_slot = slot_b;
+  if (ctrl_a > ctrl_b) {
+    std::swap(ctrl_a, ctrl_b);
+    std::swap(low_slot, hi_slot);
+  }
+  return ctrl_b < low_slot && low_slot <= hi_slot;
+}
+
+// Asserts that two iterators come from the same container.
+// Note: we take slots by reference so that it's not UB if they're uninitialized
+// as long as we don't read them (when ctrl is null).
+inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
+                                const void* const& slot_a,
+                                const void* const& slot_b,
+                                const GenerationType* generation_ptr_a,
+                                const GenerationType* generation_ptr_b) {
+  if (!SwisstableDebugEnabled()) return;
+  const bool a_is_default = ctrl_a == EmptyGroup();
+  const bool b_is_default = ctrl_b == EmptyGroup();
+  if (a_is_default != b_is_default) {
+    ABSL_INTERNAL_LOG(
+        FATAL,
+        "Invalid iterator comparison. Comparing default-constructed iterator "
+        "with non-default-constructed iterator.");
+  }
+  if (a_is_default && b_is_default) return;
+
+  if (SwisstableGenerationsEnabled()) {
+    if (generation_ptr_a == generation_ptr_b) return;
+    const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
+    const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
+    if (a_is_empty != b_is_empty) {
+      ABSL_INTERNAL_LOG(FATAL,
+                        "Invalid iterator comparison. Comparing iterator from "
+                        "a non-empty hashtable with an iterator from an empty "
+                        "hashtable.");
+    }
+    if (a_is_empty && b_is_empty) {
+      ABSL_INTERNAL_LOG(FATAL,
+                        "Invalid iterator comparison. Comparing iterators from "
+                        "different empty hashtables.");
+    }
+    const bool a_is_end = ctrl_a == nullptr;
+    const bool b_is_end = ctrl_b == nullptr;
+    if (a_is_end || b_is_end) {
+      ABSL_INTERNAL_LOG(FATAL,
+                        "Invalid iterator comparison. Comparing iterator with "
+                        "an end() iterator from a different hashtable.");
+    }
+    ABSL_INTERNAL_LOG(FATAL,
+                      "Invalid iterator comparison. Comparing non-end() "
+                      "iterators from different hashtables.");
+  } else {
+    ABSL_HARDENING_ASSERT(
+        AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
+        "Invalid iterator comparison. The iterators may be from different "
+        "containers or the container might have rehashed. Consider running "
+        "with --config=asan to diagnose rehashing issues.");
+  }
+}
+
+struct FindInfo {
+  size_t offset;
+  size_t probe_length;
+};
+
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
+//
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// `find_first_non_full()`, where we never try
+// `ShouldInsertBackwards()` for small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+// Begins a probing operation on `common.control`, using `hash`.
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
+                                      size_t hash) {
+  return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
+  return probe(common.control(), common.capacity(), hash);
+}
+
+// Probes an array of control bits using a probe sequence derived from `hash`,
+// and returns the offset corresponding to the first deleted or empty slot.
+//
+// Behavior when the entire table is full is undefined.
+//
+// NOTE: this function must work with tables having both empty and deleted
+// slots in the same group. Such tables appear during `erase()`.
+template <typename = void>
+inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
+  auto seq = probe(common, hash);
+  const ctrl_t* ctrl = common.control();
+  while (true) {
+    Group g{ctrl + seq.offset()};
+    auto mask = g.MaskEmptyOrDeleted();
+    if (mask) {
+#if !defined(NDEBUG)
+      // We want to add entropy even when ASLR is not enabled.
+      // In debug build we will randomly insert in either the front or back of
+      // the group.
+      // TODO(kfm,sbenza): revisit after we do unconditional mixing
+      if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
+        return {seq.offset(mask.HighestBitSet()), seq.index()};
+      }
+#endif
+      return {seq.offset(mask.LowestBitSet()), seq.index()};
+    }
+    seq.next();
+    assert(seq.index() <= common.capacity() && "full table!");
+  }
+}
+
+// Extern template for inline function keep possibility of inlining.
+// When compiler decided to not inline, no symbols will be added to the
+// corresponding translation unit.
+extern template FindInfo find_first_non_full(const CommonFields&, size_t);
+
+// Non-inlined version of find_first_non_full for use in less
+// performance critical routines.
+FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
+
+inline void ResetGrowthLeft(CommonFields& common) {
+  common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
+}
+
+// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
+// array as marked as empty.
+inline void ResetCtrl(CommonFields& common, size_t slot_size) {
+  const size_t capacity = common.capacity();
+  ctrl_t* ctrl = common.control();
+  std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+              capacity + 1 + NumClonedBytes());
+  ctrl[capacity] = ctrl_t::kSentinel;
+  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
+  ResetGrowthLeft(common);
+}
+
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
+inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
+                    size_t slot_size) {
+  const size_t capacity = common.capacity();
+  assert(i < capacity);
+
+  auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
+  if (IsFull(h)) {
+    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
+  } else {
+    SanitizerPoisonMemoryRegion(slot_i, slot_size);
+  }
+
+  ctrl_t* ctrl = common.control();
+  ctrl[i] = h;
+  ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
+}
+
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
+                    size_t slot_size) {
+  SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
+}
+
+// growth_left (which is a size_t) is stored with the backing array.
+constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
+  return (std::max)(align_of_slot, alignof(size_t));
+}
+
+template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
+  assert(c.capacity());
+  // Folks with custom allocators often make unwarranted assumptions about the
+  // behavior of their classes vis-a-vis trivial destructability and what
+  // calls they will or won't make.  Avoid sampling for people with custom
+  // allocators to get us out of this mess.  This is not a hard guarantee but
+  // a workaround while we plan the exact guarantee we want to provide.
+  const size_t sample_size =
+      (std::is_same<Alloc, std::allocator<char>>::value &&
+       c.slot_array() == nullptr)
+          ? SizeOfSlot
+          : 0;
+
+  const size_t cap = c.capacity();
+  const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
+  // growth_left (which is a size_t) is stored with the backing array.
+  char* mem = static_cast<char*>(
+      Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
+  const GenerationType old_generation = c.generation();
+  c.set_generation_ptr(
+      reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
+  c.set_generation(NextGeneration(old_generation));
+  c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
+  c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
+  ResetCtrl(c, SizeOfSlot);
+  if (sample_size) {
+    c.infoz() = Sample(sample_size);
+  }
+  c.infoz().RecordStorageChanged(c.size(), cap);
+}
+
+// PolicyFunctions bundles together some information for a particular
+// raw_hash_set<T, ...> instantiation. This information is passed to
+// type-erased functions that want to do small amounts of type-specific
+// work.
+struct PolicyFunctions {
+  size_t slot_size;
+
+  // Returns the hash of the pointed-to slot.
+  size_t (*hash_slot)(void* set, void* slot);
+
+  // Transfer the contents of src_slot to dst_slot.
+  void (*transfer)(void* set, void* dst_slot, void* src_slot);
+
+  // Deallocate the backing store from common.
+  void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
+};
+
+// ClearBackingArray clears the backing array, either modifying it in place,
+// or creating a new one based on the value of "reuse".
+// REQUIRES: c.capacity > 0
+void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
+                       bool reuse);
+
+// Type-erased version of raw_hash_set::erase_meta_only.
+void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
+
+// Function to place in PolicyFunctions::dealloc for raw_hash_sets
+// that are using std::allocator. This allows us to share the same
+// function body for raw_hash_set instantiations that have the
+// same slot alignment.
+template <size_t AlignOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
+                                                const PolicyFunctions& policy) {
+  // Unpoison before returning the memory to the allocator.
+  SanitizerUnpoisonMemoryRegion(common.slot_array(),
+                                policy.slot_size * common.capacity());
+
+  std::allocator<char> alloc;
+  Deallocate<BackingArrayAlignment(AlignOfSlot)>(
+      &alloc, common.backing_array_start(),
+      common.alloc_size(policy.slot_size, AlignOfSlot));
+}
+
+// For trivially relocatable types we use memcpy directly. This allows us to
+// share the same function body for raw_hash_set instantiations that have the
+// same slot size as long as they are relocatable.
+template <size_t SizeOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
+  memcpy(dst, src, SizeOfSlot);
+}
+
+// Type-erased version of raw_hash_set::drop_deletes_without_resize.
+void DropDeletesWithoutResize(CommonFields& common,
+                              const PolicyFunctions& policy, void* tmp_space);
+
+// A SwissTable.
+//
 // Policy: a policy defines how to perform different operations on
 // the slots of the hashtable (see hash_policy_traits.h for the full interface
 // of policy).
@@ -579,13 +1531,6 @@
   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
 
-  using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
-
-  static Layout MakeLayout(size_t capacity) {
-    assert(IsValidCapacity(capacity));
-    return Layout(capacity + Group::kWidth + 1, capacity);
-  }
-
   using AllocTraits = absl::allocator_traits<allocator_type>;
   using SlotAlloc = typename absl::allocator_traits<
       allocator_type>::template rebind_alloc<slot_type>;
@@ -628,7 +1573,7 @@
   static_assert(std::is_same<const_pointer, const value_type*>::value,
                 "Allocators with custom pointer types are not supported");
 
-  class iterator {
+  class iterator : private HashSetIteratorGenerationInfo {
     friend class raw_hash_set;
 
    public:
@@ -644,16 +1589,19 @@
 
     // PRECONDITION: not an end() iterator.
     reference operator*() const {
-      AssertIsFull(ctrl_);
+      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
       return PolicyTraits::element(slot_);
     }
 
     // PRECONDITION: not an end() iterator.
-    pointer operator->() const { return &operator*(); }
+    pointer operator->() const {
+      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
+      return &operator*();
+    }
 
     // PRECONDITION: not an end() iterator.
     iterator& operator++() {
-      AssertIsFull(ctrl_);
+      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
       ++ctrl_;
       ++slot_;
       skip_empty_or_deleted();
@@ -667,8 +1615,10 @@
     }
 
     friend bool operator==(const iterator& a, const iterator& b) {
-      AssertIsValid(a.ctrl_);
-      AssertIsValid(b.ctrl_);
+      AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
+      AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
+      AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
+                          a.generation_ptr(), b.generation_ptr());
       return a.ctrl_ == b.ctrl_;
     }
     friend bool operator!=(const iterator& a, const iterator& b) {
@@ -676,22 +1626,35 @@
     }
 
    private:
-    iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+    iterator(ctrl_t* ctrl, slot_type* slot,
+             const GenerationType* generation_ptr)
+        : HashSetIteratorGenerationInfo(generation_ptr),
+          ctrl_(ctrl),
+          slot_(slot) {
       // This assumption helps the compiler know that any non-end iterator is
       // not equal to any end iterator.
-      ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+      ABSL_ASSUME(ctrl != nullptr);
     }
+    // For end() iterators.
+    explicit iterator(const GenerationType* generation_ptr)
+        : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
 
+    // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
+    // they reach one.
+    //
+    // If a sentinel is reached, we null `ctrl_` out instead.
     void skip_empty_or_deleted() {
       while (IsEmptyOrDeleted(*ctrl_)) {
         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
         ctrl_ += shift;
         slot_ += shift;
       }
-      if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
+      if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
     }
 
-    ctrl_t* ctrl_ = nullptr;
+    // We use EmptyGroup() for default-constructed iterators so that they can
+    // be distinguished from end iterators, which have nullptr ctrl_.
+    ctrl_t* ctrl_ = EmptyGroup();
     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
     // The member is not initialized on singleton and end iterators.
     union {
@@ -709,9 +1672,9 @@
     using pointer = typename raw_hash_set::const_pointer;
     using difference_type = typename raw_hash_set::difference_type;
 
-    const_iterator() {}
+    const_iterator() = default;
     // Implicit construction from iterator.
-    const_iterator(iterator i) : inner_(std::move(i)) {}
+    const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
 
     reference operator*() const { return *inner_; }
     pointer operator->() const { return inner_.operator->(); }
@@ -730,8 +1693,10 @@
     }
 
    private:
-    const_iterator(const ctrl_t* ctrl, const slot_type* slot)
-        : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+    const_iterator(const ctrl_t* ctrl, const slot_type* slot,
+                   const GenerationType* gen)
+        : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
+    }
 
     iterator inner_;
   };
@@ -739,18 +1704,20 @@
   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
   using insert_return_type = InsertReturnType<iterator, node_type>;
 
+  // Note: can't use `= default` due to non-default noexcept (causes
+  // problems for some compilers). NOLINTNEXTLINE
   raw_hash_set() noexcept(
-      std::is_nothrow_default_constructible<hasher>::value&&
-          std::is_nothrow_default_constructible<key_equal>::value&&
-              std::is_nothrow_default_constructible<allocator_type>::value) {}
+      std::is_nothrow_default_constructible<hasher>::value &&
+      std::is_nothrow_default_constructible<key_equal>::value &&
+      std::is_nothrow_default_constructible<allocator_type>::value) {}
 
-  explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
-                        const key_equal& eq = key_equal(),
-                        const allocator_type& alloc = allocator_type())
-      : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+  ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
+      size_t bucket_count, const hasher& hash = hasher(),
+      const key_equal& eq = key_equal(),
+      const allocator_type& alloc = allocator_type())
+      : settings_(CommonFields{}, hash, eq, alloc) {
     if (bucket_count) {
-      capacity_ = NormalizeCapacity(bucket_count);
-      reset_growth_left();
+      common().set_capacity(NormalizeCapacity(bucket_count));
       initialize_slots();
     }
   }
@@ -769,7 +1736,8 @@
   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
                const allocator_type& alloc = allocator_type())
-      : raw_hash_set(bucket_count, hash, eq, alloc) {
+      : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
+                     hash, eq, alloc) {
     insert(first, last);
   }
 
@@ -851,50 +1819,37 @@
 
   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
-    reserve(that.size());
+    const size_t size = that.size();
+    if (size == 0) return;
+    reserve(size);
     // Because the table is guaranteed to be empty, we can do something faster
     // than a full `insert`.
     for (const auto& v : that) {
       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
-      auto target = find_first_non_full(hash);
-      set_ctrl(target.offset, H2(hash));
+      auto target = find_first_non_full_outofline(common(), hash);
+      SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
       emplace_at(target.offset, v);
-      infoz_.RecordInsert(hash, target.probe_length);
+      common().maybe_increment_generation_on_insert();
+      infoz().RecordInsert(hash, target.probe_length);
     }
-    size_ = that.size();
-    growth_left() -= that.size();
+    common().set_size(size);
+    set_growth_left(growth_left() - size);
   }
 
-  raw_hash_set(raw_hash_set&& that) noexcept(
-      std::is_nothrow_copy_constructible<hasher>::value&&
-          std::is_nothrow_copy_constructible<key_equal>::value&&
-              std::is_nothrow_copy_constructible<allocator_type>::value)
-      : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
-        slots_(absl::exchange(that.slots_, nullptr)),
-        size_(absl::exchange(that.size_, 0)),
-        capacity_(absl::exchange(that.capacity_, 0)),
-        infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
-        // Hash, equality and allocator are copied instead of moved because
-        // `that` must be left valid. If Hash is std::function<Key>, moving it
-        // would create a nullptr functor that cannot be called.
-        settings_(that.settings_) {
-    // growth_left was copied above, reset the one from `that`.
-    that.growth_left() = 0;
-  }
+  ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
+      std::is_nothrow_copy_constructible<hasher>::value &&
+      std::is_nothrow_copy_constructible<key_equal>::value &&
+      std::is_nothrow_copy_constructible<allocator_type>::value)
+      :  // Hash, equality and allocator are copied instead of moved because
+         // `that` must be left valid. If Hash is std::function<Key>, moving it
+         // would create a nullptr functor that cannot be called.
+        settings_(absl::exchange(that.common(), CommonFields{}),
+                  that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
 
   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
-      : ctrl_(EmptyGroup()),
-        slots_(nullptr),
-        size_(0),
-        capacity_(0),
-        settings_(0, that.hash_ref(), that.eq_ref(), a) {
+      : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
     if (a == that.alloc_ref()) {
-      std::swap(ctrl_, that.ctrl_);
-      std::swap(slots_, that.slots_);
-      std::swap(size_, that.size_);
-      std::swap(capacity_, that.capacity_);
-      std::swap(growth_left(), that.growth_left());
-      std::swap(infoz_, that.infoz_);
+      std::swap(common(), that.common());
     } else {
       reserve(that.size());
       // Note: this will copy elements of dense_set and unordered_set instead of
@@ -913,35 +1868,54 @@
   }
 
   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
-      absl::allocator_traits<allocator_type>::is_always_equal::value&&
-          std::is_nothrow_move_assignable<hasher>::value&&
-              std::is_nothrow_move_assignable<key_equal>::value) {
+      absl::allocator_traits<allocator_type>::is_always_equal::value &&
+      std::is_nothrow_move_assignable<hasher>::value &&
+      std::is_nothrow_move_assignable<key_equal>::value) {
     // TODO(sbenza): We should only use the operations from the noexcept clause
     // to make sure we actually adhere to that contract.
+    // NOLINTNEXTLINE: not returning *this for performance.
     return move_assign(
         std::move(that),
         typename AllocTraits::propagate_on_container_move_assignment());
   }
 
-  ~raw_hash_set() { destroy_slots(); }
+  ~raw_hash_set() {
+    const size_t cap = capacity();
+    if (!cap) return;
+    destroy_slots();
 
-  iterator begin() {
+    // Unpoison before returning the memory to the allocator.
+    SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
+    Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+        &alloc_ref(), common().backing_array_start(),
+        AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
+
+    infoz().Unregister();
+  }
+
+  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = iterator_at(0);
     it.skip_empty_or_deleted();
     return it;
   }
-  iterator end() { return {}; }
+  iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return iterator(common().generation_ptr());
+  }
 
-  const_iterator begin() const {
+  const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_cast<raw_hash_set*>(this)->begin();
   }
-  const_iterator end() const { return {}; }
-  const_iterator cbegin() const { return begin(); }
-  const_iterator cend() const { return end(); }
+  const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return iterator(common().generation_ptr());
+  }
+  const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return begin();
+  }
+  const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
 
   bool empty() const { return !size(); }
-  size_t size() const { return size_; }
-  size_t capacity() const { return capacity_; }
+  size_t size() const { return common().size(); }
+  size_t capacity() const { return common().capacity(); }
   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
 
   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -952,20 +1926,26 @@
     // compared to destruction of the elements of the container. So we pick the
     // largest bucket_count() threshold for which iteration is still fast and
     // past that we simply deallocate the array.
-    if (capacity_ > 127) {
+    const size_t cap = capacity();
+    if (cap == 0) {
+      // Already guaranteed to be empty; so nothing to do.
+    } else {
       destroy_slots();
-    } else if (capacity_) {
-      for (size_t i = 0; i != capacity_; ++i) {
-        if (IsFull(ctrl_[i])) {
-          PolicyTraits::destroy(&alloc_ref(), slots_ + i);
-        }
-      }
-      size_ = 0;
-      reset_ctrl();
-      reset_growth_left();
+      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
     }
-    assert(empty());
-    infoz_.RecordStorageChanged(0, capacity_);
+    common().set_reserved_growth(0);
+    common().set_reservation_size(0);
+  }
+
+  inline void destroy_slots() {
+    const size_t cap = capacity();
+    const ctrl_t* ctrl = control();
+    slot_type* slot = slot_array();
+    for (size_t i = 0; i != cap; ++i) {
+      if (IsFull(ctrl[i])) {
+        PolicyTraits::destroy(&alloc_ref(), slot + i);
+      }
+    }
   }
 
   // This overload kicks in when the argument is an rvalue of insertable and
@@ -975,11 +1955,10 @@
   //   m.insert(std::make_pair("abc", 42));
   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
   // bug.
-  template <class T, RequiresInsertable<T> = 0,
-            class T2 = T,
+  template <class T, RequiresInsertable<T> = 0, class T2 = T,
             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
             T* = nullptr>
-  std::pair<iterator, bool> insert(T&& value) {
+  std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return emplace(std::forward<T>(value));
   }
 
@@ -994,13 +1973,11 @@
   //   const char* p = "hello";
   //   s.insert(p);
   //
-  // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
-  // RequiresInsertable<T> with RequiresInsertable<const T&>.
-  // We are hitting this bug: https://godbolt.org/g/1Vht4f.
   template <
-      class T, RequiresInsertable<T> = 0,
+      class T, RequiresInsertable<const T&> = 0,
       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
-  std::pair<iterator, bool> insert(const T& value) {
+  std::pair<iterator, bool> insert(const T& value)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return emplace(value);
   }
 
@@ -1009,7 +1986,8 @@
   //
   //   flat_hash_map<std::string, int> s;
   //   s.insert({"abc", 42});
-  std::pair<iterator, bool> insert(init_type&& value) {
+  std::pair<iterator, bool> insert(init_type&& value)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return emplace(std::move(value));
   }
 
@@ -1018,27 +1996,26 @@
   template <class T, RequiresInsertable<T> = 0, class T2 = T,
             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
             T* = nullptr>
-  iterator insert(const_iterator, T&& value) {
+  iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert(std::forward<T>(value)).first;
   }
 
-  // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
-  // RequiresInsertable<T> with RequiresInsertable<const T&>.
-  // We are hitting this bug: https://godbolt.org/g/1Vht4f.
   template <
-      class T, RequiresInsertable<T> = 0,
+      class T, RequiresInsertable<const T&> = 0,
       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
-  iterator insert(const_iterator, const T& value) {
+  iterator insert(const_iterator,
+                  const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert(value).first;
   }
 
-  iterator insert(const_iterator, init_type&& value) {
+  iterator insert(const_iterator,
+                  init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return insert(std::move(value)).first;
   }
 
   template <class InputIt>
   void insert(InputIt first, InputIt last) {
-    for (; first != last; ++first) insert(*first);
+    for (; first != last; ++first) emplace(*first);
   }
 
   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@@ -1050,7 +2027,7 @@
     insert(ilist.begin(), ilist.end());
   }
 
-  insert_return_type insert(node_type&& node) {
+  insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (!node) return {end(), false, node_type()};
     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
     auto res = PolicyTraits::apply(
@@ -1064,8 +2041,11 @@
     }
   }
 
-  iterator insert(const_iterator, node_type&& node) {
-    return insert(std::move(node)).first;
+  iterator insert(const_iterator,
+                  node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    auto res = insert(std::move(node));
+    node = std::move(res.node);
+    return res.position;
   }
 
   // This overload kicks in if we can deduce the key from args. This enables us
@@ -1079,7 +2059,8 @@
   //   m.emplace("abc", "xyz");
   template <class... Args, typename std::enable_if<
                                IsDecomposable<Args...>::value, int>::type = 0>
-  std::pair<iterator, bool> emplace(Args&&... args) {
+  std::pair<iterator, bool> emplace(Args&&... args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return PolicyTraits::apply(EmplaceDecomposable{*this},
                                std::forward<Args>(args)...);
   }
@@ -1089,7 +2070,8 @@
   // destroys.
   template <class... Args, typename std::enable_if<
                                !IsDecomposable<Args...>::value, int>::type = 0>
-  std::pair<iterator, bool> emplace(Args&&... args) {
+  std::pair<iterator, bool> emplace(Args&&... args)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
 
@@ -1099,14 +2081,16 @@
   }
 
   template <class... Args>
-  iterator emplace_hint(const_iterator, Args&&... args) {
+  iterator emplace_hint(const_iterator,
+                        Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return emplace(std::forward<Args>(args)...).first;
   }
 
   // Extension API: support for lazy emplace.
   //
   // Looks up key in the table. If found, returns the iterator to the element.
-  // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+  // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
+  // and returns an iterator to the new element.
   //
   // `f` must abide by several restrictions:
   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
@@ -1149,10 +2133,11 @@
   };
 
   template <class K = key_type, class F>
-  iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+  iterator lazy_emplace(const key_arg<K>& key,
+                        F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto res = find_or_prepare_insert(key);
     if (res.second) {
-      slot_type* slot = slots_ + res.first;
+      slot_type* slot = slot_array() + res.first;
       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
       assert(!slot);
     }
@@ -1194,12 +2179,25 @@
   // This overload is necessary because otherwise erase<K>(const K&) would be
   // a better match if non-const iterator is passed as an argument.
   void erase(iterator it) {
-    AssertIsFull(it.ctrl_);
+    AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
     PolicyTraits::destroy(&alloc_ref(), it.slot_);
     erase_meta_only(it);
   }
 
-  iterator erase(const_iterator first, const_iterator last) {
+  iterator erase(const_iterator first,
+                 const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    // We check for empty first because ClearBackingArray requires that
+    // capacity() > 0 as a precondition.
+    if (empty()) return end();
+    if (first == begin() && last == end()) {
+      // TODO(ezb): we access control bytes in destroy_slots so it could make
+      // sense to combine destroy_slots and ClearBackingArray to avoid cache
+      // misses when the table is large. Note that we also do this in clear().
+      destroy_slots();
+      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
+      common().set_reserved_growth(common().reservation_size());
+      return end();
+    }
     while (first != last) {
       erase(first++);
     }
@@ -1228,7 +2226,8 @@
   }
 
   node_type extract(const_iterator position) {
-    AssertIsFull(position.inner_.ctrl_);
+    AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
+                 position.inner_.generation_ptr(), "extract()");
     auto node =
         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
     erase_meta_only(position);
@@ -1248,35 +2247,45 @@
       IsNoThrowSwappable<allocator_type>(
           typename AllocTraits::propagate_on_container_swap{})) {
     using std::swap;
-    swap(ctrl_, that.ctrl_);
-    swap(slots_, that.slots_);
-    swap(size_, that.size_);
-    swap(capacity_, that.capacity_);
-    swap(growth_left(), that.growth_left());
+    swap(common(), that.common());
     swap(hash_ref(), that.hash_ref());
     swap(eq_ref(), that.eq_ref());
-    swap(infoz_, that.infoz_);
     SwapAlloc(alloc_ref(), that.alloc_ref(),
               typename AllocTraits::propagate_on_container_swap{});
   }
 
   void rehash(size_t n) {
-    if (n == 0 && capacity_ == 0) return;
-    if (n == 0 && size_ == 0) {
-      destroy_slots();
-      infoz_.RecordStorageChanged(0, 0);
+    if (n == 0 && capacity() == 0) return;
+    if (n == 0 && size() == 0) {
+      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
       return;
     }
+
     // bitor is a faster way of doing `max` here. We will round up to the next
     // power-of-2-minus-1, so bitor is good enough.
     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
     // n == 0 unconditionally rehashes as per the standard.
-    if (n == 0 || m > capacity_) {
+    if (n == 0 || m > capacity()) {
       resize(m);
+
+      // This is after resize, to ensure that we have completed the allocation
+      // and have potentially sampled the hashtable.
+      infoz().RecordReservation(n);
     }
   }
 
-  void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
+  void reserve(size_t n) {
+    if (n > size() + growth_left()) {
+      size_t m = GrowthToLowerboundCapacity(n);
+      resize(NormalizeCapacity(m));
+
+      // This is after resize, to ensure that we have completed the allocation
+      // and have potentially sampled the hashtable.
+      infoz().RecordReservation(n);
+    }
+    common().reset_reserved_growth(n);
+    common().set_reservation_size(n);
+  }
 
   // Extension API: support for heterogeneous keys.
   //
@@ -1300,11 +2309,13 @@
   template <class K = key_type>
   void prefetch(const key_arg<K>& key) const {
     (void)key;
-#if defined(__GNUC__)
-    auto seq = probe(hash_ref()(key));
-    __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
-    __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
-#endif  // __GNUC__
+    // Avoid probing if we won't be able to prefetch the addresses received.
+#ifdef ABSL_HAVE_PREFETCH
+    prefetch_heap_block();
+    auto seq = probe(common(), hash_ref()(key));
+    PrefetchToLocalCache(control() + seq.offset());
+    PrefetchToLocalCache(slot_array() + seq.offset());
+#endif  // ABSL_HAVE_PREFETCH
   }
 
   // The API of find() has two extensions.
@@ -1315,32 +2326,39 @@
   // 2. The type of the key argument doesn't have to be key_type. This is so
   // called heterogeneous key support.
   template <class K = key_type>
-  iterator find(const key_arg<K>& key, size_t hash) {
-    auto seq = probe(hash);
+  iterator find(const key_arg<K>& key,
+                size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    auto seq = probe(common(), hash);
+    slot_type* slot_ptr = slot_array();
+    const ctrl_t* ctrl = control();
     while (true) {
-      Group g{ctrl_ + seq.offset()};
-      for (int i : g.Match(H2(hash))) {
+      Group g{ctrl + seq.offset()};
+      for (uint32_t i : g.Match(H2(hash))) {
         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
                 EqualElement<K>{key, eq_ref()},
-                PolicyTraits::element(slots_ + seq.offset(i)))))
+                PolicyTraits::element(slot_ptr + seq.offset(i)))))
           return iterator_at(seq.offset(i));
       }
-      if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
       seq.next();
-      assert(seq.index() < capacity_ && "full table!");
+      assert(seq.index() <= capacity() && "full table!");
     }
   }
   template <class K = key_type>
-  iterator find(const key_arg<K>& key) {
+  iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    prefetch_heap_block();
     return find(key, hash_ref()(key));
   }
 
   template <class K = key_type>
-  const_iterator find(const key_arg<K>& key, size_t hash) const {
+  const_iterator find(const key_arg<K>& key,
+                      size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_cast<raw_hash_set*>(this)->find(key, hash);
   }
   template <class K = key_type>
-  const_iterator find(const key_arg<K>& key) const {
+  const_iterator find(const key_arg<K>& key) const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    prefetch_heap_block();
     return find(key, hash_ref()(key));
   }
 
@@ -1350,22 +2368,23 @@
   }
 
   template <class K = key_type>
-  std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+  std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = find(key);
     if (it != end()) return {it, std::next(it)};
     return {it, it};
   }
   template <class K = key_type>
   std::pair<const_iterator, const_iterator> equal_range(
-      const key_arg<K>& key) const {
+      const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto it = find(key);
     if (it != end()) return {it, std::next(it)};
     return {it, it};
   }
 
-  size_t bucket_count() const { return capacity_; }
+  size_t bucket_count() const { return capacity(); }
   float load_factor() const {
-    return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+    return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
   }
   float max_load_factor() const { return 1.0f; }
   void max_load_factor(float) {
@@ -1390,6 +2409,14 @@
     return !(a == b);
   }
 
+  template <typename H>
+  friend typename std::enable_if<H::template is_hashable<value_type>::value,
+                                 H>::type
+  AbslHashValue(H h, const raw_hash_set& s) {
+    return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
+                      s.size());
+  }
+
   friend void swap(raw_hash_set& a,
                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
     a.swap(b);
@@ -1444,7 +2471,8 @@
     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
       auto res = s.find_or_prepare_insert(key);
       if (res.second) {
-        PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+        PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
+                               &slot);
       } else if (do_destroy) {
         PolicyTraits::destroy(&s.alloc_ref(), &slot);
       }
@@ -1455,235 +2483,145 @@
     slot_type&& slot;
   };
 
-  // "erases" the object from the container, except that it doesn't actually
-  // destroy the object. It only updates all the metadata of the class.
-  // This can be used in conjunction with Policy::transfer to move the object to
-  // another place.
+  // Erases, but does not destroy, the value pointed to by `it`.
+  //
+  // This merely updates the pertinent control byte. This can be used in
+  // conjunction with Policy::transfer to move the object to another place.
   void erase_meta_only(const_iterator it) {
-    assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
-    --size_;
-    const size_t index = it.inner_.ctrl_ - ctrl_;
-    const size_t index_before = (index - Group::kWidth) & capacity_;
-    const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
-    const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
-
-    // We count how many consecutive non empties we have to the right and to the
-    // left of `it`. If the sum is >= kWidth then there is at least one probe
-    // window that might have seen a full group.
-    bool was_never_full =
-        empty_before && empty_after &&
-        static_cast<size_t>(empty_after.TrailingZeros() +
-                            empty_before.LeadingZeros()) < Group::kWidth;
-
-    set_ctrl(index, was_never_full ? kEmpty : kDeleted);
-    growth_left() += was_never_full;
-    infoz_.RecordErase();
+    EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
   }
 
-  void initialize_slots() {
-    assert(capacity_);
-    // Folks with custom allocators often make unwarranted assumptions about the
-    // behavior of their classes vis-a-vis trivial destructability and what
-    // calls they will or wont make.  Avoid sampling for people with custom
-    // allocators to get us out of this mess.  This is not a hard guarantee but
-    // a workaround while we plan the exact guarantee we want to provide.
-    //
+  // Allocates a backing array for `self` and initializes its control bytes.
+  // This reads `capacity` and updates all other fields based on the result of
+  // the allocation.
+  //
+  // This does not free the currently held array; `capacity` must be nonzero.
+  inline void initialize_slots() {
     // People are often sloppy with the exact type of their allocator (sometimes
     // it has an extra const or is missing the pair, but rebinds made it work
-    // anyway).  To avoid the ambiguity, we work off SlotAlloc which we have
-    // bound more carefully.
-    if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
-        slots_ == nullptr) {
-      infoz_ = Sample();
-    }
-
-    auto layout = MakeLayout(capacity_);
-    char* mem = static_cast<char*>(
-        Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
-    ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
-    slots_ = layout.template Pointer<1>(mem);
-    reset_ctrl();
-    reset_growth_left();
-    infoz_.RecordStorageChanged(size_, capacity_);
+    // anyway).
+    using CharAlloc =
+        typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
+    InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
+        common(), CharAlloc(alloc_ref()));
   }
 
-  void destroy_slots() {
-    if (!capacity_) return;
-    for (size_t i = 0; i != capacity_; ++i) {
-      if (IsFull(ctrl_[i])) {
-        PolicyTraits::destroy(&alloc_ref(), slots_ + i);
-      }
-    }
-    auto layout = MakeLayout(capacity_);
-    // Unpoison before returning the memory to the allocator.
-    SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
-    Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
-    ctrl_ = EmptyGroup();
-    slots_ = nullptr;
-    size_ = 0;
-    capacity_ = 0;
-    growth_left() = 0;
-  }
-
-  void resize(size_t new_capacity) {
+  ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
     assert(IsValidCapacity(new_capacity));
-    auto* old_ctrl = ctrl_;
-    auto* old_slots = slots_;
-    const size_t old_capacity = capacity_;
-    capacity_ = new_capacity;
+    auto* old_ctrl = control();
+    auto* old_slots = slot_array();
+    const size_t old_capacity = common().capacity();
+    common().set_capacity(new_capacity);
     initialize_slots();
 
+    auto* new_slots = slot_array();
     size_t total_probe_length = 0;
     for (size_t i = 0; i != old_capacity; ++i) {
       if (IsFull(old_ctrl[i])) {
         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
                                           PolicyTraits::element(old_slots + i));
-        auto target = find_first_non_full(hash);
+        auto target = find_first_non_full(common(), hash);
         size_t new_i = target.offset;
         total_probe_length += target.probe_length;
-        set_ctrl(new_i, H2(hash));
-        PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+        SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
+        PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
       }
     }
     if (old_capacity) {
       SanitizerUnpoisonMemoryRegion(old_slots,
                                     sizeof(slot_type) * old_capacity);
-      auto layout = MakeLayout(old_capacity);
-      Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
-                                      layout.AllocSize());
+      Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+          &alloc_ref(), old_ctrl - ControlOffset(),
+          AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
     }
-    infoz_.RecordRehash(total_probe_length);
+    infoz().RecordRehash(total_probe_length);
   }
 
-  void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
-    assert(IsValidCapacity(capacity_));
-    assert(!is_small());
-    // Algorithm:
-    // - mark all DELETED slots as EMPTY
-    // - mark all FULL slots as DELETED
-    // - for each slot marked as DELETED
-    //     hash = Hash(element)
-    //     target = find_first_non_full(hash)
-    //     if target is in the same group
-    //       mark slot as FULL
-    //     else if target is EMPTY
-    //       transfer element to target
-    //       mark slot as EMPTY
-    //       mark target as FULL
-    //     else if target is DELETED
-    //       swap current element with target element
-    //       mark target as FULL
-    //       repeat procedure for current slot with moved from element (target)
-    ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
-    alignas(slot_type) unsigned char raw[sizeof(slot_type)];
-    size_t total_probe_length = 0;
-    slot_type* slot = reinterpret_cast<slot_type*>(&raw);
-    for (size_t i = 0; i != capacity_; ++i) {
-      if (!IsDeleted(ctrl_[i])) continue;
-      size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
-                                        PolicyTraits::element(slots_ + i));
-      auto target = find_first_non_full(hash);
-      size_t new_i = target.offset;
-      total_probe_length += target.probe_length;
-
-      // Verify if the old and new i fall within the same group wrt the hash.
-      // If they do, we don't need to move the object as it falls already in the
-      // best probe we can.
-      const auto probe_index = [&](size_t pos) {
-        return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
-      };
-
-      // Element doesn't move.
-      if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
-        set_ctrl(i, H2(hash));
-        continue;
-      }
-      if (IsEmpty(ctrl_[new_i])) {
-        // Transfer element to the empty spot.
-        // set_ctrl poisons/unpoisons the slots so we have to call it at the
-        // right time.
-        set_ctrl(new_i, H2(hash));
-        PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
-        set_ctrl(i, kEmpty);
-      } else {
-        assert(IsDeleted(ctrl_[new_i]));
-        set_ctrl(new_i, H2(hash));
-        // Until we are done rehashing, DELETED marks previously FULL slots.
-        // Swap i and new_i elements.
-        PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
-        PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
-        PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
-        --i;  // repeat
-      }
-    }
-    reset_growth_left();
-    infoz_.RecordRehash(total_probe_length);
+  // Prunes control bytes to remove as many tombstones as possible.
+  //
+  // See the comment on `rehash_and_grow_if_necessary()`.
+  inline void drop_deletes_without_resize() {
+    // Stack-allocate space for swapping elements.
+    alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
+    DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
   }
 
+  // Called whenever the table *might* need to conditionally grow.
+  //
+  // This function is an optimization opportunity to perform a rehash even when
+  // growth is unnecessary, because vacating tombstones is beneficial for
+  // performance in the long-run.
   void rehash_and_grow_if_necessary() {
-    if (capacity_ == 0) {
-      resize(1);
-    } else if (size() <= CapacityToGrowth(capacity()) / 2) {
+    const size_t cap = capacity();
+    if (cap > Group::kWidth &&
+        // Do these calculations in 64-bit to avoid overflow.
+        size() * uint64_t{32} <= cap * uint64_t{25}) {
       // Squash DELETED without growing if there is enough capacity.
+      //
+      // Rehash in place if the current size is <= 25/32 of capacity.
+      // Rationale for such a high factor: 1) drop_deletes_without_resize() is
+      // faster than resize, and 2) it takes quite a bit of work to add
+      // tombstones.  In the worst case, seems to take approximately 4
+      // insert/erase pairs to create a single tombstone and so if we are
+      // rehashing because of tombstones, we can afford to rehash-in-place as
+      // long as we are reclaiming at least 1/8 the capacity without doing more
+      // than 2X the work.  (Where "work" is defined to be size() for rehashing
+      // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
+      // place is faster per operation than inserting or even doubling the size
+      // of the table, so we actually afford to reclaim even less space from a
+      // resize-in-place.  The decision is to rehash in place if we can reclaim
+      // at about 1/8th of the usable capacity (specifically 3/28 of the
+      // capacity) which means that the total cost of rehashing will be a small
+      // fraction of the total work.
+      //
+      // Here is output of an experiment using the BM_CacheInSteadyState
+      // benchmark running the old case (where we rehash-in-place only if we can
+      // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
+      // if we can recover 3/32*capacity).
+      //
+      // Note that although in the worst-case number of rehashes jumped up from
+      // 15 to 190, but the number of operations per second is almost the same.
+      //
+      // Abridged output of running BM_CacheInSteadyState benchmark from
+      // raw_hash_set_benchmark.   N is the number of insert/erase operations.
+      //
+      //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
+      // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
+      //  448 | 145284       0.44        18 | 140118       0.44        19
+      //  493 | 152546       0.24        11 | 151417       0.48        28
+      //  538 | 151439       0.26        11 | 151152       0.53        38
+      //  583 | 151765       0.28        11 | 150572       0.57        50
+      //  628 | 150241       0.31        11 | 150853       0.61        66
+      //  672 | 149602       0.33        12 | 150110       0.66        90
+      //  717 | 149998       0.35        12 | 149531       0.70       129
+      //  762 | 149836       0.37        13 | 148559       0.74       190
+      //  807 | 149736       0.39        14 | 151107       0.39        14
+      //  852 | 150204       0.42        15 | 151019       0.42        15
       drop_deletes_without_resize();
     } else {
       // Otherwise grow the container.
-      resize(capacity_ * 2 + 1);
+      resize(NextCapacity(cap));
     }
   }
 
   bool has_element(const value_type& elem) const {
     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
-    auto seq = probe(hash);
+    auto seq = probe(common(), hash);
+    const ctrl_t* ctrl = control();
     while (true) {
-      Group g{ctrl_ + seq.offset()};
-      for (int i : g.Match(H2(hash))) {
-        if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
-                              elem))
+      Group g{ctrl + seq.offset()};
+      for (uint32_t i : g.Match(H2(hash))) {
+        if (ABSL_PREDICT_TRUE(
+                PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
           return true;
       }
-      if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
       seq.next();
-      assert(seq.index() < capacity_ && "full table!");
+      assert(seq.index() <= capacity() && "full table!");
     }
     return false;
   }
 
-  // Probes the raw_hash_set with the probe sequence for hash and returns the
-  // pointer to the first empty or deleted slot.
-  // NOTE: this function must work with tables having both kEmpty and kDelete
-  // in one group. Such tables appears during drop_deletes_without_resize.
-  //
-  // This function is very useful when insertions happen and:
-  // - the input is already a set
-  // - there are enough slots
-  // - the element with the hash is not in the table
-  struct FindInfo {
-    size_t offset;
-    size_t probe_length;
-  };
-  FindInfo find_first_non_full(size_t hash) {
-    auto seq = probe(hash);
-    while (true) {
-      Group g{ctrl_ + seq.offset()};
-      auto mask = g.MatchEmptyOrDeleted();
-      if (mask) {
-#if !defined(NDEBUG)
-        // We want to add entropy even when ASLR is not enabled.
-        // In debug build we will randomly insert in either the front or back of
-        // the group.
-        // TODO(kfm,sbenza): revisit after we do unconditional mixing
-        if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
-          return {seq.offset(mask.HighestBitSet()), seq.index()};
-        }
-#endif
-        return {seq.offset(mask.LowestBitSet()), seq.index()};
-      }
-      seq.next();
-      assert(seq.index() < capacity_ && "full table!");
-    }
-  }
-
   // TODO(alkis): Optimize this assuming *this and that don't overlap.
   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
     raw_hash_set tmp(std::move(that));
@@ -1697,36 +2635,54 @@
   }
 
  protected:
+  // Attempts to find `key` in the table; if it isn't found, returns a slot that
+  // the value can be inserted into, with the control byte already set to
+  // `key`'s H2.
   template <class K>
   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+    prefetch_heap_block();
     auto hash = hash_ref()(key);
-    auto seq = probe(hash);
+    auto seq = probe(common(), hash);
+    const ctrl_t* ctrl = control();
     while (true) {
-      Group g{ctrl_ + seq.offset()};
-      for (int i : g.Match(H2(hash))) {
+      Group g{ctrl + seq.offset()};
+      for (uint32_t i : g.Match(H2(hash))) {
         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
                 EqualElement<K>{key, eq_ref()},
-                PolicyTraits::element(slots_ + seq.offset(i)))))
+                PolicyTraits::element(slot_array() + seq.offset(i)))))
           return {seq.offset(i), false};
       }
-      if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
       seq.next();
-      assert(seq.index() < capacity_ && "full table!");
+      assert(seq.index() <= capacity() && "full table!");
     }
     return {prepare_insert(hash), true};
   }
 
+  // Given the hash of a value not currently in the table, finds the next
+  // viable slot index to insert it at.
+  //
+  // REQUIRES: At least one non-full slot available.
   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
-    auto target = find_first_non_full(hash);
-    if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
-                           !IsDeleted(ctrl_[target.offset]))) {
-      rehash_and_grow_if_necessary();
-      target = find_first_non_full(hash);
+    const bool rehash_for_bug_detection =
+        common().should_rehash_for_bug_detection_on_insert();
+    if (rehash_for_bug_detection) {
+      // Move to a different heap allocation in order to detect bugs.
+      const size_t cap = capacity();
+      resize(growth_left() > 0 ? cap : NextCapacity(cap));
     }
-    ++size_;
-    growth_left() -= IsEmpty(ctrl_[target.offset]);
-    set_ctrl(target.offset, H2(hash));
-    infoz_.RecordInsert(hash, target.probe_length);
+    auto target = find_first_non_full(common(), hash);
+    if (!rehash_for_bug_detection &&
+        ABSL_PREDICT_FALSE(growth_left() == 0 &&
+                           !IsDeleted(control()[target.offset]))) {
+      rehash_and_grow_if_necessary();
+      target = find_first_non_full(common(), hash);
+    }
+    common().set_size(common().size() + 1);
+    set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
+    SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
+    common().maybe_increment_generation_on_insert();
+    infoz().RecordInsert(hash, target.probe_length);
     return target.offset;
   }
 
@@ -1740,7 +2696,7 @@
   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
   template <class... Args>
   void emplace_at(size_t i, Args&&... args) {
-    PolicyTraits::construct(&alloc_ref(), slots_ + i,
+    PolicyTraits::construct(&alloc_ref(), slot_array() + i,
                             std::forward<Args>(args)...);
 
     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
@@ -1748,60 +2704,46 @@
            "constructed value does not match the lookup key");
   }
 
-  iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
-  const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+  iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return {control() + i, slot_array() + i, common().generation_ptr()};
+  }
+  const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return {control() + i, slot_array() + i, common().generation_ptr()};
+  }
 
  private:
   friend struct RawHashSetTestOnlyAccess;
 
-  probe_seq<Group::kWidth> probe(size_t hash) const {
-    return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
-  }
-
-  // Reset all ctrl bytes back to kEmpty, except the sentinel.
-  void reset_ctrl() {
-    std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
-    ctrl_[capacity_] = kSentinel;
-    SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
-  }
-
-  void reset_growth_left() {
-    growth_left() = CapacityToGrowth(capacity()) - size_;
-  }
-
-  // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
-  // the end too.
-  void set_ctrl(size_t i, ctrl_t h) {
-    assert(i < capacity_);
-
-    if (IsFull(h)) {
-      SanitizerUnpoisonObject(slots_ + i);
-    } else {
-      SanitizerPoisonObject(slots_ + i);
-    }
-
-    ctrl_[i] = h;
-    ctrl_[((i - Group::kWidth) & capacity_) + 1 +
-          ((Group::kWidth - 1) & capacity_)] = h;
-  }
-
-  size_t& growth_left() { return settings_.template get<0>(); }
-
-  // The representation of the object has two modes:
-  //  - small: For capacities < kWidth-1
-  //  - large: For the rest.
+  // The number of slots we can still fill without needing to rehash.
   //
-  // Differences:
-  //  - In small mode we are able to use the whole capacity. The extra control
-  //  bytes give us at least one "empty" control byte to stop the iteration.
-  //  This is important to make 1 a valid capacity.
+  // This is stored separately due to tombstones: we do not include tombstones
+  // in the growth capacity, because we'd like to rehash when the table is
+  // otherwise filled with tombstones: otherwise, probe sequences might get
+  // unacceptably long without triggering a rehash. Callers can also force a
+  // rehash via the standard `rehash(0)`, which will recompute this value as a
+  // side-effect.
   //
-  //  - In small mode only the first `capacity()` control bytes after the
-  //  sentinel are valid. The rest contain dummy kEmpty values that do not
-  //  represent a real slot. This is important to take into account on
-  //  find_first_non_full(), where we never try ShouldInsertBackwards() for
-  //  small tables.
-  bool is_small() const { return capacity_ < Group::kWidth - 1; }
+  // See `CapacityToGrowth()`.
+  size_t growth_left() const { return common().growth_left(); }
+  void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
+
+  // Prefetch the heap-allocated memory region to resolve potential TLB and
+  // cache misses. This is intended to overlap with execution of calculating the
+  // hash for a key.
+  void prefetch_heap_block() const {
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+    __builtin_prefetch(control(), 0, 1);
+#endif
+  }
+
+  CommonFields& common() { return settings_.template get<0>(); }
+  const CommonFields& common() const { return settings_.template get<0>(); }
+
+  ctrl_t* control() const { return common().control(); }
+  slot_type* slot_array() const {
+    return static_cast<slot_type*>(common().slot_array());
+  }
+  HashtablezInfoHandle& infoz() { return common().infoz(); }
 
   hasher& hash_ref() { return settings_.template get<1>(); }
   const hasher& hash_ref() const { return settings_.template get<1>(); }
@@ -1812,28 +2754,66 @@
     return settings_.template get<3>();
   }
 
-  // TODO(alkis): Investigate removing some of these fields:
-  // - ctrl/slots can be derived from each other
-  // - size can be moved into the slot array
-  ctrl_t* ctrl_ = EmptyGroup();    // [(capacity + 1) * ctrl_t]
-  slot_type* slots_ = nullptr;     // [capacity * slot_type]
-  size_t size_ = 0;                // number of full slots
-  size_t capacity_ = 0;            // total number of slots
-  HashtablezInfoHandle infoz_;
-  absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
-                                            key_equal, allocator_type>
-      settings_{0, hasher{}, key_equal{}, allocator_type{}};
+  // Make type-specific functions for this type's PolicyFunctions struct.
+  static size_t hash_slot_fn(void* set, void* slot) {
+    auto* h = static_cast<raw_hash_set*>(set);
+    return PolicyTraits::apply(
+        HashElement{h->hash_ref()},
+        PolicyTraits::element(static_cast<slot_type*>(slot)));
+  }
+  static void transfer_slot_fn(void* set, void* dst, void* src) {
+    auto* h = static_cast<raw_hash_set*>(set);
+    PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
+                           static_cast<slot_type*>(src));
+  }
+  // Note: dealloc_fn will only be used if we have a non-standard allocator.
+  static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
+    auto* set = reinterpret_cast<raw_hash_set*>(&common);
+
+    // Unpoison before returning the memory to the allocator.
+    SanitizerUnpoisonMemoryRegion(common.slot_array(),
+                                  sizeof(slot_type) * common.capacity());
+
+    Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+        &set->alloc_ref(), common.backing_array_start(),
+        common.alloc_size(sizeof(slot_type), alignof(slot_type)));
+  }
+
+  static const PolicyFunctions& GetPolicyFunctions() {
+    static constexpr PolicyFunctions value = {
+        sizeof(slot_type),
+        &raw_hash_set::hash_slot_fn,
+        PolicyTraits::transfer_uses_memcpy()
+            ? TransferRelocatable<sizeof(slot_type)>
+            : &raw_hash_set::transfer_slot_fn,
+        (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
+             ? &DeallocateStandard<alignof(slot_type)>
+             : &raw_hash_set::dealloc_fn),
+    };
+    return value;
+  }
+
+  // Bundle together CommonFields plus other objects which might be empty.
+  // CompressedTuple will ensure that sizeof is not affected by any of the empty
+  // fields that occur after CommonFields.
+  absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
+                                            allocator_type>
+      settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
 };
 
 // Erases all elements that satisfy the predicate `pred` from the container `c`.
 template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+typename raw_hash_set<P, H, E, A>::size_type EraseIf(
+    Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+  const auto initial_size = c->size();
   for (auto it = c->begin(), last = c->end(); it != last;) {
-    auto copy_it = it++;
-    if (pred(*copy_it)) {
-      c->erase(copy_it);
+    if (pred(*it)) {
+      c->erase(it++);
+    } else {
+      ++it;
     }
   }
+  return initial_size - c->size();
 }
 
 namespace hashtable_debug_internal {
@@ -1846,36 +2826,37 @@
                              const typename Set::key_type& key) {
     size_t num_probes = 0;
     size_t hash = set.hash_ref()(key);
-    auto seq = set.probe(hash);
+    auto seq = probe(set.common(), hash);
+    const ctrl_t* ctrl = set.control();
     while (true) {
-      container_internal::Group g{set.ctrl_ + seq.offset()};
-      for (int i : g.Match(container_internal::H2(hash))) {
+      container_internal::Group g{ctrl + seq.offset()};
+      for (uint32_t i : g.Match(container_internal::H2(hash))) {
         if (Traits::apply(
                 typename Set::template EqualElement<typename Set::key_type>{
                     key, set.eq_ref()},
-                Traits::element(set.slots_ + seq.offset(i))))
+                Traits::element(set.slot_array() + seq.offset(i))))
           return num_probes;
         ++num_probes;
       }
-      if (g.MatchEmpty()) return num_probes;
+      if (g.MaskEmpty()) return num_probes;
       seq.next();
       ++num_probes;
     }
   }
 
   static size_t AllocatedByteSize(const Set& c) {
-    size_t capacity = c.capacity_;
+    size_t capacity = c.capacity();
     if (capacity == 0) return 0;
-    auto layout = Set::MakeLayout(capacity);
-    size_t m = layout.AllocSize();
+    size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
 
     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
     if (per_slot != ~size_t{}) {
       m += per_slot * c.size();
     } else {
+      const ctrl_t* ctrl = c.control();
       for (size_t i = 0; i != capacity; ++i) {
-        if (container_internal::IsFull(c.ctrl_[i])) {
-          m += Traits::space_used(c.slots_ + i);
+        if (container_internal::IsFull(ctrl[i])) {
+          m += Traits::space_used(c.slot_array() + i);
         }
       }
     }
@@ -1885,8 +2866,8 @@
   static size_t LowerBoundAllocatedByteSize(size_t size) {
     size_t capacity = GrowthToLowerboundCapacity(size);
     if (capacity == 0) return 0;
-    auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
-    size_t m = layout.AllocSize();
+    size_t m =
+        AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
     if (per_slot != ~size_t{}) {
       m += per_slot * size;
@@ -1900,4 +2881,6 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
+#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+
 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc b/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
index 1a03608..e73f53f 100644
--- a/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -466,6 +466,9 @@
   size_t id_ = std::numeric_limits<size_t>::max();
 };
 
+// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
+#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
+    __GNUC_MINOR__ != 5)
 TEST(NoPropagateOn, Swap) {
   using PA = PAlloc<char>;
   using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
@@ -475,6 +478,7 @@
   EXPECT_EQ(t1.get_allocator(), PA(1));
   EXPECT_EQ(t2.get_allocator(), PA(2));
 }
+#endif
 
 TEST(NoPropagateOn, CopyConstruct) {
   using PA = PAlloc<char>;
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc b/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc
new file mode 100644
index 0000000..a364789
--- /dev/null
+++ b/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc
@@ -0,0 +1,544 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <random>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/container/internal/raw_hash_set.h"
+#include "absl/strings/str_format.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+struct RawHashSetTestOnlyAccess {
+  template <typename C>
+  static auto GetSlots(const C& c) -> decltype(c.slots_) {
+    return c.slots_;
+  }
+};
+
+namespace {
+
+struct IntPolicy {
+  using slot_type = int64_t;
+  using key_type = int64_t;
+  using init_type = int64_t;
+
+  static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
+  static void destroy(void*, int64_t*) {}
+  static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
+    *new_slot = *old_slot;
+  }
+
+  static int64_t& element(slot_type* slot) { return *slot; }
+
+  template <class F>
+  static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
+    return std::forward<F>(f)(x, x);
+  }
+};
+
+class StringPolicy {
+  template <class F, class K, class V,
+            class = typename std::enable_if<
+                std::is_convertible<const K&, absl::string_view>::value>::type>
+  decltype(std::declval<F>()(
+      std::declval<const absl::string_view&>(), std::piecewise_construct,
+      std::declval<std::tuple<K>>(),
+      std::declval<V>())) static apply_impl(F&& f,
+                                            std::pair<std::tuple<K>, V> p) {
+    const absl::string_view& key = std::get<0>(p.first);
+    return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+                              std::move(p.second));
+  }
+
+ public:
+  struct slot_type {
+    struct ctor {};
+
+    template <class... Ts>
+    slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+
+    std::pair<std::string, std::string> pair;
+  };
+
+  using key_type = std::string;
+  using init_type = std::pair<std::string, std::string>;
+
+  template <class allocator_type, class... Args>
+  static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
+    std::allocator_traits<allocator_type>::construct(
+        *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
+  }
+
+  template <class allocator_type>
+  static void destroy(allocator_type* alloc, slot_type* slot) {
+    std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+  }
+
+  template <class allocator_type>
+  static void transfer(allocator_type* alloc, slot_type* new_slot,
+                       slot_type* old_slot) {
+    construct(alloc, new_slot, std::move(old_slot->pair));
+    destroy(alloc, old_slot);
+  }
+
+  static std::pair<std::string, std::string>& element(slot_type* slot) {
+    return slot->pair;
+  }
+
+  template <class F, class... Args>
+  static auto apply(F&& f, Args&&... args)
+      -> decltype(apply_impl(std::forward<F>(f),
+                             PairArgs(std::forward<Args>(args)...))) {
+    return apply_impl(std::forward<F>(f),
+                      PairArgs(std::forward<Args>(args)...));
+  }
+};
+
+struct StringHash : container_internal::hash_default_hash<absl::string_view> {
+  using is_transparent = void;
+};
+struct StringEq : std::equal_to<absl::string_view> {
+  using is_transparent = void;
+};
+
+struct StringTable
+    : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
+  using Base = typename StringTable::raw_hash_set;
+  StringTable() {}
+  using Base::Base;
+};
+
+struct IntTable
+    : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+                   std::equal_to<int64_t>, std::allocator<int64_t>> {
+  using Base = typename IntTable::raw_hash_set;
+  IntTable() {}
+  using Base::Base;
+};
+
+struct string_generator {
+  template <class RNG>
+  std::string operator()(RNG& rng) const {
+    std::string res;
+    res.resize(size);
+    std::uniform_int_distribution<uint32_t> printable_ascii(0x20, 0x7E);
+    std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); });
+    return res;
+  }
+
+  size_t size;
+};
+
+// Model a cache in steady state.
+//
+// On a table of size N, keep deleting the LRU entry and add a random one.
+void BM_CacheInSteadyState(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  string_generator gen{12};
+  StringTable t;
+  std::deque<std::string> keys;
+  while (t.size() < state.range(0)) {
+    auto x = t.emplace(gen(rng), gen(rng));
+    if (x.second) keys.push_back(x.first->first);
+  }
+  ABSL_RAW_CHECK(state.range(0) >= 10, "");
+  while (state.KeepRunning()) {
+    // Some cache hits.
+    std::deque<std::string>::const_iterator it;
+    for (int i = 0; i != 90; ++i) {
+      if (i % 10 == 0) it = keys.end();
+      ::benchmark::DoNotOptimize(t.find(*--it));
+    }
+    // Some cache misses.
+    for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng)));
+    ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str());
+    keys.pop_front();
+    while (true) {
+      auto x = t.emplace(gen(rng), gen(rng));
+      if (x.second) {
+        keys.push_back(x.first->first);
+        break;
+      }
+    }
+  }
+  state.SetItemsProcessed(state.iterations());
+  state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor()));
+}
+
+template <typename Benchmark>
+void CacheInSteadyStateArgs(Benchmark* bm) {
+  // The default.
+  const float max_load_factor = 0.875;
+  // When the cache is at the steady state, the probe sequence will equal
+  // capacity if there is no reclamation of deleted slots. Pick a number large
+  // enough to make the benchmark slow for that case.
+  const size_t capacity = 1 << 10;
+
+  // Check N data points to cover load factors in [0.4, 0.8).
+  const size_t kNumPoints = 10;
+  for (size_t i = 0; i != kNumPoints; ++i)
+    bm->Arg(std::ceil(
+        capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2));
+}
+BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs);
+
+void BM_EndComparison(benchmark::State& state) {
+  StringTable t = {{"a", "a"}, {"b", "b"}};
+  auto it = t.begin();
+  for (auto i : state) {
+    benchmark::DoNotOptimize(t);
+    benchmark::DoNotOptimize(it);
+    benchmark::DoNotOptimize(it != t.end());
+  }
+}
+BENCHMARK(BM_EndComparison);
+
+void BM_Iteration(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  string_generator gen{12};
+  StringTable t;
+
+  size_t capacity = state.range(0);
+  size_t size = state.range(1);
+  t.reserve(capacity);
+
+  while (t.size() < size) {
+    t.emplace(gen(rng), gen(rng));
+  }
+
+  for (auto i : state) {
+    benchmark::DoNotOptimize(t);
+    for (auto it = t.begin(); it != t.end(); ++it) {
+      benchmark::DoNotOptimize(*it);
+    }
+  }
+}
+
+BENCHMARK(BM_Iteration)
+    ->ArgPair(1, 1)
+    ->ArgPair(2, 2)
+    ->ArgPair(4, 4)
+    ->ArgPair(7, 7)
+    ->ArgPair(10, 10)
+    ->ArgPair(15, 15)
+    ->ArgPair(16, 16)
+    ->ArgPair(54, 54)
+    ->ArgPair(100, 100)
+    ->ArgPair(400, 400)
+    // empty
+    ->ArgPair(0, 0)
+    ->ArgPair(10, 0)
+    ->ArgPair(100, 0)
+    ->ArgPair(1000, 0)
+    ->ArgPair(10000, 0)
+    // sparse
+    ->ArgPair(100, 1)
+    ->ArgPair(1000, 10);
+
+void BM_CopyCtorSparseInt(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  IntTable t;
+  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+
+  size_t size = state.range(0);
+  t.reserve(size * 10);
+  while (t.size() < size) {
+    t.emplace(dist(rng));
+  }
+
+  for (auto i : state) {
+    IntTable t2 = t;
+    benchmark::DoNotOptimize(t2);
+  }
+}
+BENCHMARK(BM_CopyCtorSparseInt)->Range(128, 4096);
+
+void BM_CopyCtorInt(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  IntTable t;
+  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+
+  size_t size = state.range(0);
+  while (t.size() < size) {
+    t.emplace(dist(rng));
+  }
+
+  for (auto i : state) {
+    IntTable t2 = t;
+    benchmark::DoNotOptimize(t2);
+  }
+}
+BENCHMARK(BM_CopyCtorInt)->Range(128, 4096);
+
+void BM_CopyCtorString(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  StringTable t;
+  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+
+  size_t size = state.range(0);
+  while (t.size() < size) {
+    t.emplace(std::to_string(dist(rng)), std::to_string(dist(rng)));
+  }
+
+  for (auto i : state) {
+    StringTable t2 = t;
+    benchmark::DoNotOptimize(t2);
+  }
+}
+BENCHMARK(BM_CopyCtorString)->Range(128, 4096);
+
+void BM_CopyAssign(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  IntTable t;
+  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+  while (t.size() < state.range(0)) {
+    t.emplace(dist(rng));
+  }
+
+  IntTable t2;
+  for (auto _ : state) {
+    t2 = t;
+    benchmark::DoNotOptimize(t2);
+  }
+}
+BENCHMARK(BM_CopyAssign)->Range(128, 4096);
+
+void BM_RangeCtor(benchmark::State& state) {
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+  std::vector<int> values;
+  const size_t desired_size = state.range(0);
+  while (values.size() < desired_size) {
+    values.emplace_back(dist(rng));
+  }
+
+  for (auto unused : state) {
+    IntTable t{values.begin(), values.end()};
+    benchmark::DoNotOptimize(t);
+  }
+}
+BENCHMARK(BM_RangeCtor)->Range(128, 65536);
+
+void BM_NoOpReserveIntTable(benchmark::State& state) {
+  IntTable t;
+  t.reserve(100000);
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(t);
+    t.reserve(100000);
+  }
+}
+BENCHMARK(BM_NoOpReserveIntTable);
+
+void BM_NoOpReserveStringTable(benchmark::State& state) {
+  StringTable t;
+  t.reserve(100000);
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(t);
+    t.reserve(100000);
+  }
+}
+BENCHMARK(BM_NoOpReserveStringTable);
+
+void BM_ReserveIntTable(benchmark::State& state) {
+  int reserve_size = state.range(0);
+  for (auto _ : state) {
+    state.PauseTiming();
+    IntTable t;
+    state.ResumeTiming();
+    benchmark::DoNotOptimize(t);
+    t.reserve(reserve_size);
+  }
+}
+BENCHMARK(BM_ReserveIntTable)->Range(128, 4096);
+
+void BM_ReserveStringTable(benchmark::State& state) {
+  int reserve_size = state.range(0);
+  for (auto _ : state) {
+    state.PauseTiming();
+    StringTable t;
+    state.ResumeTiming();
+    benchmark::DoNotOptimize(t);
+    t.reserve(reserve_size);
+  }
+}
+BENCHMARK(BM_ReserveStringTable)->Range(128, 4096);
+
+// Like std::iota, except that ctrl_t doesn't support operator++.
+template <typename CtrlIter>
+void Iota(CtrlIter begin, CtrlIter end, int value) {
+  for (; begin != end; ++begin, ++value) {
+    *begin = static_cast<ctrl_t>(value);
+  }
+}
+
+void BM_Group_Match(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -4);
+  Group g{group.data()};
+  h2_t h = 1;
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(h);
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.Match(h));
+  }
+}
+BENCHMARK(BM_Group_Match);
+
+void BM_Group_MaskEmpty(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -4);
+  Group g{group.data()};
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.MaskEmpty());
+  }
+}
+BENCHMARK(BM_Group_MaskEmpty);
+
+void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -4);
+  Group g{group.data()};
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted());
+  }
+}
+BENCHMARK(BM_Group_MaskEmptyOrDeleted);
+
+void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -2);
+  Group g{group.data()};
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted());
+  }
+}
+BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted);
+
+void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -2);
+  Group g{group.data()};
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet());
+  }
+}
+BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
+
+void BM_DropDeletes(benchmark::State& state) {
+  constexpr size_t capacity = (1 << 20) - 1;
+  std::vector<ctrl_t> ctrl(capacity + 1 + Group::kWidth);
+  ctrl[capacity] = ctrl_t::kSentinel;
+  std::vector<ctrl_t> pattern = {ctrl_t::kEmpty,   static_cast<ctrl_t>(2),
+                                 ctrl_t::kDeleted, static_cast<ctrl_t>(2),
+                                 ctrl_t::kEmpty,   static_cast<ctrl_t>(1),
+                                 ctrl_t::kDeleted};
+  for (size_t i = 0; i != capacity; ++i) {
+    ctrl[i] = pattern[i % pattern.size()];
+  }
+  while (state.KeepRunning()) {
+    state.PauseTiming();
+    std::vector<ctrl_t> ctrl_copy = ctrl;
+    state.ResumeTiming();
+    ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity);
+    ::benchmark::DoNotOptimize(ctrl_copy[capacity]);
+  }
+}
+BENCHMARK(BM_DropDeletes);
+
+void BM_Resize(benchmark::State& state) {
+  // For now just measure a small cheap hash table since we
+  // are mostly interested in the overhead of type-erasure
+  // in resize().
+  constexpr int kElements = 64;
+  const int kCapacity = kElements * 2;
+
+  IntTable table;
+  for (int i = 0; i < kElements; i++) {
+    table.insert(i);
+  }
+  for (auto unused : state) {
+    table.rehash(0);
+    table.rehash(kCapacity);
+  }
+}
+BENCHMARK(BM_Resize);
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+// These methods are here to make it easy to examine the assembly for targeted
+// parts of the API.
+auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table,
+                                    int64_t key) -> decltype(table->find(key)) {
+  return table->find(key);
+}
+
+bool CodegenAbslRawHashSetInt64FindNeEnd(
+    absl::container_internal::IntTable* table, int64_t key) {
+  return table->find(key) != table->end();
+}
+
+// This is useful because the find isn't inlined but the iterator comparison is.
+bool CodegenAbslRawHashSetStringFindNeEnd(
+    absl::container_internal::StringTable* table, const std::string& key) {
+  return table->find(key) != table->end();
+}
+
+auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table,
+                                      int64_t key)
+    -> decltype(table->insert(key)) {
+  return table->insert(key);
+}
+
+bool CodegenAbslRawHashSetInt64Contains(
+    absl::container_internal::IntTable* table, int64_t key) {
+  return table->contains(key);
+}
+
+void CodegenAbslRawHashSetInt64Iterate(
+    absl::container_internal::IntTable* table) {
+  for (auto x : *table) benchmark::DoNotOptimize(x);
+}
+
+int odr =
+    (::benchmark::DoNotOptimize(std::make_tuple(
+         &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd,
+         &CodegenAbslRawHashSetStringFindNeEnd,
+         &CodegenAbslRawHashSetInt64Insert, &CodegenAbslRawHashSetInt64Contains,
+         &CodegenAbslRawHashSetInt64Iterate)),
+     1);
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc b/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc
new file mode 100644
index 0000000..7169a2e
--- /dev/null
+++ b/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc
@@ -0,0 +1,590 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates probe length statistics for many combinations of key types and key
+// distributions, all using the default hash function for swisstable.
+
+#include <memory>
+#include <regex>  // NOLINT
+#include <vector>
+
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/container/internal/hashtable_debug.h"
+#include "absl/container/internal/raw_hash_set.h"
+#include "absl/random/distributions.h"
+#include "absl/random/random.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/strip.h"
+
+namespace {
+
+enum class OutputStyle { kRegular, kBenchmark };
+
+// The --benchmark command line flag.
+// This is populated from main().
+// When run in "benchmark" mode, we have different output. This allows
+// A/B comparisons with tools like `benchy`.
+absl::string_view benchmarks;
+
+OutputStyle output() {
+  return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular;
+}
+
+template <class T>
+struct Policy {
+  using slot_type = T;
+  using key_type = T;
+  using init_type = T;
+
+  template <class allocator_type, class Arg>
+  static void construct(allocator_type* alloc, slot_type* slot,
+                        const Arg& arg) {
+    std::allocator_traits<allocator_type>::construct(*alloc, slot, arg);
+  }
+
+  template <class allocator_type>
+  static void destroy(allocator_type* alloc, slot_type* slot) {
+    std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+  }
+
+  static slot_type& element(slot_type* slot) { return *slot; }
+
+  template <class F, class... Args>
+  static auto apply(F&& f, const slot_type& arg)
+      -> decltype(std::forward<F>(f)(arg, arg)) {
+    return std::forward<F>(f)(arg, arg);
+  }
+};
+
+absl::BitGen& GlobalBitGen() {
+  static auto* value = new absl::BitGen;
+  return *value;
+}
+
+// Keeps a pool of allocations and randomly gives one out.
+// This introduces more randomization to the addresses given to swisstable and
+// should help smooth out this factor from probe length calculation.
+template <class T>
+class RandomizedAllocator {
+ public:
+  using value_type = T;
+
+  RandomizedAllocator() = default;
+  template <typename U>
+  RandomizedAllocator(RandomizedAllocator<U>) {}  // NOLINT
+
+  static T* allocate(size_t n) {
+    auto& pointers = GetPointers(n);
+    // Fill the pool
+    while (pointers.size() < kRandomPool) {
+      pointers.push_back(std::allocator<T>{}.allocate(n));
+    }
+
+    // Choose a random one.
+    size_t i = absl::Uniform<size_t>(GlobalBitGen(), 0, pointers.size());
+    T* result = pointers[i];
+    pointers[i] = pointers.back();
+    pointers.pop_back();
+    return result;
+  }
+
+  static void deallocate(T* p, size_t n) {
+    // Just put it back on the pool. No need to release the memory.
+    GetPointers(n).push_back(p);
+  }
+
+ private:
+  // We keep at least kRandomPool allocations for each size.
+  static constexpr size_t kRandomPool = 20;
+
+  static std::vector<T*>& GetPointers(size_t n) {
+    static auto* m = new absl::flat_hash_map<size_t, std::vector<T*>>();
+    return (*m)[n];
+  }
+};
+
+template <class T>
+struct DefaultHash {
+  using type = absl::container_internal::hash_default_hash<T>;
+};
+
+template <class T>
+using DefaultHashT = typename DefaultHash<T>::type;
+
+template <class T>
+struct Table : absl::container_internal::raw_hash_set<
+                   Policy<T>, DefaultHashT<T>,
+                   absl::container_internal::hash_default_eq<T>,
+                   RandomizedAllocator<T>> {};
+
+struct LoadSizes {
+  size_t min_load;
+  size_t max_load;
+};
+
+LoadSizes GetMinMaxLoadSizes() {
+  static const auto sizes = [] {
+    Table<int> t;
+
+    // First, fill enough to have a good distribution.
+    constexpr size_t kMinSize = 10000;
+    while (t.size() < kMinSize) t.insert(t.size());
+
+    const auto reach_min_load_factor = [&] {
+      const double lf = t.load_factor();
+      while (lf <= t.load_factor()) t.insert(t.size());
+    };
+
+    // Then, insert until we reach min load factor.
+    reach_min_load_factor();
+    const size_t min_load_size = t.size();
+
+    // Keep going until we hit min load factor again, then go back one.
+    t.insert(t.size());
+    reach_min_load_factor();
+
+    return LoadSizes{min_load_size, t.size() - 1};
+  }();
+  return sizes;
+}
+
+struct Ratios {
+  double min_load;
+  double avg_load;
+  double max_load;
+};
+
+// See absl/container/internal/hashtable_debug.h for details on
+// probe length calculation.
+template <class ElemFn>
+Ratios CollectMeanProbeLengths() {
+  const auto min_max_sizes = GetMinMaxLoadSizes();
+
+  ElemFn elem;
+  using Key = decltype(elem());
+  Table<Key> t;
+
+  Ratios result;
+  while (t.size() < min_max_sizes.min_load) t.insert(elem());
+  result.min_load =
+      absl::container_internal::GetHashtableDebugProbeSummary(t).mean;
+
+  while (t.size() < (min_max_sizes.min_load + min_max_sizes.max_load) / 2)
+    t.insert(elem());
+  result.avg_load =
+      absl::container_internal::GetHashtableDebugProbeSummary(t).mean;
+
+  while (t.size() < min_max_sizes.max_load) t.insert(elem());
+  result.max_load =
+      absl::container_internal::GetHashtableDebugProbeSummary(t).mean;
+
+  return result;
+}
+
+template <int Align>
+uintptr_t PointerForAlignment() {
+  alignas(Align) static constexpr uintptr_t kInitPointer = 0;
+  return reinterpret_cast<uintptr_t>(&kInitPointer);
+}
+
+// This incomplete type is used for testing hash of pointers of different
+// alignments.
+// NOTE: We are generating invalid pointer values on the fly with
+// reinterpret_cast. There are not "safely derived" pointers so using them is
+// technically UB. It is unlikely to be a problem, though.
+template <int Align>
+struct Ptr;
+
+template <int Align>
+Ptr<Align>* MakePtr(uintptr_t v) {
+  if (sizeof(v) == 8) {
+    constexpr int kCopyBits = 16;
+    // Ensure high bits are all the same.
+    v = static_cast<uintptr_t>(static_cast<intptr_t>(v << kCopyBits) >>
+                               kCopyBits);
+  }
+  return reinterpret_cast<Ptr<Align>*>(v);
+}
+
+struct IntIdentity {
+  uint64_t i;
+  friend bool operator==(IntIdentity a, IntIdentity b) { return a.i == b.i; }
+  IntIdentity operator++(int) { return IntIdentity{i++}; }
+};
+
+template <int Align>
+struct PtrIdentity {
+  explicit PtrIdentity(uintptr_t val = PointerForAlignment<Align>()) : i(val) {}
+  uintptr_t i;
+  friend bool operator==(PtrIdentity a, PtrIdentity b) { return a.i == b.i; }
+  PtrIdentity operator++(int) {
+    PtrIdentity p(i);
+    i += Align;
+    return p;
+  }
+};
+
+constexpr char kStringFormat[] = "/path/to/file/name-%07d-of-9999999.txt";
+
+template <bool small>
+struct String {
+  std::string value;
+  static std::string Make(uint32_t v) {
+    return {small ? absl::StrCat(v) : absl::StrFormat(kStringFormat, v)};
+  }
+};
+
+template <>
+struct DefaultHash<IntIdentity> {
+  struct type {
+    size_t operator()(IntIdentity t) const { return t.i; }
+  };
+};
+
+template <int Align>
+struct DefaultHash<PtrIdentity<Align>> {
+  struct type {
+    size_t operator()(PtrIdentity<Align> t) const { return t.i; }
+  };
+};
+
+template <class T>
+struct Sequential {
+  T operator()() const { return current++; }
+  mutable T current{};
+};
+
+template <int Align>
+struct Sequential<Ptr<Align>*> {
+  Ptr<Align>* operator()() const {
+    auto* result = MakePtr<Align>(current);
+    current += Align;
+    return result;
+  }
+  mutable uintptr_t current = PointerForAlignment<Align>();
+};
+
+
+template <bool small>
+struct Sequential<String<small>> {
+  std::string operator()() const { return String<small>::Make(current++); }
+  mutable uint32_t current = 0;
+};
+
+template <class T, class U>
+struct Sequential<std::pair<T, U>> {
+  mutable Sequential<T> tseq;
+  mutable Sequential<U> useq;
+
+  using RealT = decltype(tseq());
+  using RealU = decltype(useq());
+
+  mutable std::vector<RealT> ts;
+  mutable std::vector<RealU> us;
+  mutable size_t ti = 0, ui = 0;
+
+  std::pair<RealT, RealU> operator()() const {
+    std::pair<RealT, RealU> value{get_t(), get_u()};
+    if (ti == 0) {
+      ti = ui + 1;
+      ui = 0;
+    } else {
+      --ti;
+      ++ui;
+    }
+    return value;
+  }
+
+  RealT get_t() const {
+    while (ti >= ts.size()) ts.push_back(tseq());
+    return ts[ti];
+  }
+
+  RealU get_u() const {
+    while (ui >= us.size()) us.push_back(useq());
+    return us[ui];
+  }
+};
+
+template <class T, int percent_skip>
+struct AlmostSequential {
+  mutable Sequential<T> current;
+
+  auto operator()() const -> decltype(current()) {
+    while (absl::Uniform(GlobalBitGen(), 0.0, 1.0) <= percent_skip / 100.)
+      current();
+    return current();
+  }
+};
+
+struct Uniform {
+  template <typename T>
+  T operator()(T) const {
+    return absl::Uniform<T>(absl::IntervalClosed, GlobalBitGen(), T{0}, ~T{0});
+  }
+};
+
+struct Gaussian {
+  template <typename T>
+  T operator()(T) const {
+    double d;
+    do {
+      d = absl::Gaussian<double>(GlobalBitGen(), 1e6, 1e4);
+    } while (d <= 0 || d > std::numeric_limits<T>::max() / 2);
+    return static_cast<T>(d);
+  }
+};
+
+struct Zipf {
+  template <typename T>
+  T operator()(T) const {
+    return absl::Zipf<T>(GlobalBitGen(), std::numeric_limits<T>::max(), 1.6);
+  }
+};
+
+template <class T, class Dist>
+struct Random {
+  T operator()() const { return Dist{}(T{}); }
+};
+
+template <class Dist, int Align>
+struct Random<Ptr<Align>*, Dist> {
+  Ptr<Align>* operator()() const {
+    return MakePtr<Align>(Random<uintptr_t, Dist>{}() * Align);
+  }
+};
+
+template <class Dist>
+struct Random<IntIdentity, Dist> {
+  IntIdentity operator()() const {
+    return IntIdentity{Random<uint64_t, Dist>{}()};
+  }
+};
+
+template <class Dist, int Align>
+struct Random<PtrIdentity<Align>, Dist> {
+  PtrIdentity<Align> operator()() const {
+    return PtrIdentity<Align>{Random<uintptr_t, Dist>{}() * Align};
+  }
+};
+
+template <class Dist, bool small>
+struct Random<String<small>, Dist> {
+  std::string operator()() const {
+    return String<small>::Make(Random<uint32_t, Dist>{}());
+  }
+};
+
+template <class T, class U, class Dist>
+struct Random<std::pair<T, U>, Dist> {
+  auto operator()() const
+      -> decltype(std::make_pair(Random<T, Dist>{}(), Random<U, Dist>{}())) {
+    return std::make_pair(Random<T, Dist>{}(), Random<U, Dist>{}());
+  }
+};
+
+template <typename>
+std::string Name();
+
+std::string Name(uint32_t*) { return "u32"; }
+std::string Name(uint64_t*) { return "u64"; }
+std::string Name(IntIdentity*) { return "IntIdentity"; }
+
+template <int Align>
+std::string Name(Ptr<Align>**) {
+  return absl::StrCat("Ptr", Align);
+}
+
+template <int Align>
+std::string Name(PtrIdentity<Align>*) {
+  return absl::StrCat("PtrIdentity", Align);
+}
+
+template <bool small>
+std::string Name(String<small>*) {
+  return small ? "StrS" : "StrL";
+}
+
+template <class T, class U>
+std::string Name(std::pair<T, U>*) {
+  if (output() == OutputStyle::kBenchmark)
+    return absl::StrCat("P_", Name<T>(), "_", Name<U>());
+  return absl::StrCat("P<", Name<T>(), ",", Name<U>(), ">");
+}
+
+template <class T>
+std::string Name(Sequential<T>*) {
+  return "Sequential";
+}
+
+template <class T, int P>
+std::string Name(AlmostSequential<T, P>*) {
+  return absl::StrCat("AlmostSeq_", P);
+}
+
+template <class T>
+std::string Name(Random<T, Uniform>*) {
+  return "UnifRand";
+}
+
+template <class T>
+std::string Name(Random<T, Gaussian>*) {
+  return "GausRand";
+}
+
+template <class T>
+std::string Name(Random<T, Zipf>*) {
+  return "ZipfRand";
+}
+
+template <typename T>
+std::string Name() {
+  return Name(static_cast<T*>(nullptr));
+}
+
+constexpr int kNameWidth = 15;
+constexpr int kDistWidth = 16;
+
+bool CanRunBenchmark(absl::string_view name) {
+  static std::regex* const filter = []() -> std::regex* {
+    return benchmarks.empty() || benchmarks == "all"
+               ? nullptr
+               : new std::regex(std::string(benchmarks));
+  }();
+  return filter == nullptr || std::regex_search(std::string(name), *filter);
+}
+
+struct Result {
+  std::string name;
+  std::string dist_name;
+  Ratios ratios;
+};
+
+template <typename T, typename Dist>
+void RunForTypeAndDistribution(std::vector<Result>& results) {
+  std::string name = absl::StrCat(Name<T>(), "/", Name<Dist>());
+  // We have to check against all three names (min/avg/max) before we run it.
+  // If any of them is enabled, we run it.
+  if (!CanRunBenchmark(absl::StrCat(name, "/min")) &&
+      !CanRunBenchmark(absl::StrCat(name, "/avg")) &&
+      !CanRunBenchmark(absl::StrCat(name, "/max"))) {
+    return;
+  }
+  results.push_back({Name<T>(), Name<Dist>(), CollectMeanProbeLengths<Dist>()});
+}
+
+template <class T>
+void RunForType(std::vector<Result>& results) {
+  RunForTypeAndDistribution<T, Sequential<T>>(results);
+  RunForTypeAndDistribution<T, AlmostSequential<T, 20>>(results);
+  RunForTypeAndDistribution<T, AlmostSequential<T, 50>>(results);
+  RunForTypeAndDistribution<T, Random<T, Uniform>>(results);
+#ifdef NDEBUG
+  // Disable these in non-opt mode because they take too long.
+  RunForTypeAndDistribution<T, Random<T, Gaussian>>(results);
+  RunForTypeAndDistribution<T, Random<T, Zipf>>(results);
+#endif  // NDEBUG
+}
+
+}  // namespace
+
+int main(int argc, char** argv) {
+  // Parse the benchmark flags. Ignore all of them except the regex pattern.
+  for (int i = 1; i < argc; ++i) {
+    absl::string_view arg = argv[i];
+    const auto next = [&] { return argv[std::min(i + 1, argc - 1)]; };
+
+    if (absl::ConsumePrefix(&arg, "--benchmark_filter")) {
+      if (arg == "") {
+        // --benchmark_filter X
+        benchmarks = next();
+      } else if (absl::ConsumePrefix(&arg, "=")) {
+        // --benchmark_filter=X
+        benchmarks = arg;
+      }
+    }
+
+    // Any --benchmark flag turns on the mode.
+    if (absl::ConsumePrefix(&arg, "--benchmark")) {
+      if (benchmarks.empty()) benchmarks="all";
+    }
+  }
+
+  std::vector<Result> results;
+  RunForType<uint64_t>(results);
+  RunForType<IntIdentity>(results);
+  RunForType<Ptr<8>*>(results);
+  RunForType<Ptr<16>*>(results);
+  RunForType<Ptr<32>*>(results);
+  RunForType<Ptr<64>*>(results);
+  RunForType<PtrIdentity<8>>(results);
+  RunForType<PtrIdentity<16>>(results);
+  RunForType<PtrIdentity<32>>(results);
+  RunForType<PtrIdentity<64>>(results);
+  RunForType<std::pair<uint32_t, uint32_t>>(results);
+  RunForType<String<true>>(results);
+  RunForType<String<false>>(results);
+  RunForType<std::pair<uint64_t, String<true>>>(results);
+  RunForType<std::pair<String<true>, uint64_t>>(results);
+  RunForType<std::pair<uint64_t, String<false>>>(results);
+  RunForType<std::pair<String<false>, uint64_t>>(results);
+
+  switch (output()) {
+    case OutputStyle::kRegular:
+      absl::PrintF("%-*s%-*s       Min       Avg       Max\n%s\n", kNameWidth,
+                   "Type", kDistWidth, "Distribution",
+                   std::string(kNameWidth + kDistWidth + 10 * 3, '-'));
+      for (const auto& result : results) {
+        absl::PrintF("%-*s%-*s  %8.4f  %8.4f  %8.4f\n", kNameWidth, result.name,
+                     kDistWidth, result.dist_name, result.ratios.min_load,
+                     result.ratios.avg_load, result.ratios.max_load);
+      }
+      break;
+    case OutputStyle::kBenchmark: {
+      absl::PrintF("{\n");
+      absl::PrintF("  \"benchmarks\": [\n");
+      absl::string_view comma;
+      for (const auto& result : results) {
+        auto print = [&](absl::string_view stat, double Ratios::*val) {
+          std::string name =
+              absl::StrCat(result.name, "/", result.dist_name, "/", stat);
+          // Check the regex again. We might had have enabled only one of the
+          // stats for the benchmark.
+          if (!CanRunBenchmark(name)) return;
+          absl::PrintF("    %s{\n", comma);
+          absl::PrintF("      \"cpu_time\": %f,\n", 1e9 * result.ratios.*val);
+          absl::PrintF("      \"real_time\": %f,\n", 1e9 * result.ratios.*val);
+          absl::PrintF("      \"iterations\": 1,\n");
+          absl::PrintF("      \"name\": \"%s\",\n", name);
+          absl::PrintF("      \"time_unit\": \"ns\"\n");
+          absl::PrintF("    }\n");
+          comma = ",";
+        };
+        print("min", &Ratios::min_load);
+        print("avg", &Ratios::avg_load);
+        print("max", &Ratios::max_load);
+      }
+      absl::PrintF("  ],\n");
+      absl::PrintF("  \"context\": {\n");
+      absl::PrintF("  }\n");
+      absl::PrintF("}\n");
+      break;
+    }
+  }
+
+  return 0;
+}
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_test.cc b/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
index f5ae83c..242a97c 100644
--- a/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
+++ b/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
@@ -14,25 +14,41 @@
 
 #include "absl/container/internal/raw_hash_set.h"
 
+#include <algorithm>
+#include <atomic>
 #include <cmath>
+#include <cstddef>
 #include <cstdint>
 #include <deque>
 #include <functional>
+#include <iostream>
+#include <iterator>
+#include <list>
+#include <map>
 #include <memory>
 #include <numeric>
+#include <ostream>
 #include <random>
 #include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/base/prefetch.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hash_function_defaults.h"
 #include "absl/container/internal/hash_policy_testing.h"
 #include "absl/container/internal/hashtable_debug.h"
+#include "absl/log/log.h"
 #include "absl/strings/string_view.h"
 
 namespace absl {
@@ -41,21 +57,27 @@
 
 struct RawHashSetTestOnlyAccess {
   template <typename C>
-  static auto GetSlots(const C& c) -> decltype(c.slots_) {
-    return c.slots_;
+  static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
+    return c.slot_array();
+  }
+  template <typename C>
+  static size_t CountTombstones(const C& c) {
+    return c.common().TombstonesCount();
   }
 };
 
 namespace {
 
-using ::testing::DoubleNear;
 using ::testing::ElementsAre;
+using ::testing::Eq;
 using ::testing::Ge;
 using ::testing::Lt;
-using ::testing::Optional;
 using ::testing::Pair;
 using ::testing::UnorderedElementsAre;
 
+// Convenience function to static cast to ctrl_t.
+ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
+
 TEST(Util, NormalizeCapacity) {
   EXPECT_EQ(1, NormalizeCapacity(0));
   EXPECT_EQ(1, NormalizeCapacity(1));
@@ -75,8 +97,14 @@
   for (size_t growth = 0; growth < 10000; ++growth) {
     SCOPED_TRACE(growth);
     size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
-    // The capacity is large enough for `growth`
+    // The capacity is large enough for `growth`.
     EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+    // For (capacity+1) < kWidth, growth should equal capacity.
+    if (capacity + 1 < Group::kWidth) {
+      EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
+    } else {
+      EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
+    }
     if (growth != 0 && capacity > 1) {
       // There is no smaller capacity that works.
       EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
@@ -162,15 +190,19 @@
 
 TEST(Group, Match) {
   if (Group::kWidth == 16) {
-    ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
-                      7,      5, 3,        1, 1,      1, 1,         1};
+    ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
+                      ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+                      CtrlT(7),       CtrlT(5), CtrlT(3),          CtrlT(1),
+                      CtrlT(1),       CtrlT(1), CtrlT(1),          CtrlT(1)};
     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
     EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
     EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
     EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
   } else if (Group::kWidth == 8) {
-    ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
+                      ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
+                      ctrl_t::kSentinel, CtrlT(1)};
     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
     EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
@@ -179,27 +211,39 @@
   }
 }
 
-TEST(Group, MatchEmpty) {
+TEST(Group, MaskEmpty) {
   if (Group::kWidth == 16) {
-    ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
-                      7,      5, 3,        1, 1,      1, 1,         1};
-    EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
+    ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
+                      ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+                      CtrlT(7),       CtrlT(5), CtrlT(3),          CtrlT(1),
+                      CtrlT(1),       CtrlT(1), CtrlT(1),          CtrlT(1)};
+    EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
+    EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
   } else if (Group::kWidth == 8) {
-    ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
-    EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
+    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
+                      ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
+                      ctrl_t::kSentinel, CtrlT(1)};
+    EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
+    EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
   } else {
     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
   }
 }
 
-TEST(Group, MatchEmptyOrDeleted) {
+TEST(Group, MaskEmptyOrDeleted) {
   if (Group::kWidth == 16) {
-    ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
-                      7,      5, 3,        1, 1,      1, 1,         1};
-    EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+    ctrl_t group[] = {ctrl_t::kEmpty,   CtrlT(1), ctrl_t::kEmpty,    CtrlT(3),
+                      ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+                      CtrlT(7),         CtrlT(5), CtrlT(3),          CtrlT(1),
+                      CtrlT(1),         CtrlT(1), CtrlT(1),          CtrlT(1)};
+    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
+    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
   } else if (Group::kWidth == 8) {
-    ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
-    EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
+                      ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
+                      ctrl_t::kSentinel, CtrlT(1)};
+    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
+    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
   } else {
     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
   }
@@ -209,28 +253,32 @@
   constexpr size_t kCapacity = 63;
   constexpr size_t kGroupWidth = container_internal::Group::kWidth;
   std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
-  ctrl[kCapacity] = kSentinel;
-  std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+  ctrl[kCapacity] = ctrl_t::kSentinel;
+  std::vector<ctrl_t> pattern = {
+      ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
+      ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
   for (size_t i = 0; i != kCapacity; ++i) {
     ctrl[i] = pattern[i % pattern.size()];
     if (i < kGroupWidth - 1)
       ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
   }
   ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
-  ASSERT_EQ(ctrl[kCapacity], kSentinel);
-  for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+  ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
+  for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
     ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
-    if (i == kCapacity) expected = kSentinel;
-    if (expected == kDeleted) expected = kEmpty;
-    if (IsFull(expected)) expected = kDeleted;
+    if (i == kCapacity) expected = ctrl_t::kSentinel;
+    if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
+    if (IsFull(expected)) expected = ctrl_t::kDeleted;
     EXPECT_EQ(ctrl[i], expected)
-        << i << " " << int{pattern[i % pattern.size()]};
+        << i << " " << static_cast<int>(pattern[i % pattern.size()]);
   }
 }
 
 TEST(Group, CountLeadingEmptyOrDeleted) {
-  const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
-  const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+  const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
+  const std::vector<ctrl_t> full_examples = {
+      CtrlT(0), CtrlT(1), CtrlT(2),   CtrlT(3),
+      CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
 
   for (ctrl_t empty : empty_examples) {
     std::vector<ctrl_t> e(Group::kWidth, empty);
@@ -250,25 +298,44 @@
   }
 }
 
-struct IntPolicy {
-  using slot_type = int64_t;
-  using key_type = int64_t;
-  using init_type = int64_t;
+template <class T>
+struct ValuePolicy {
+  using slot_type = T;
+  using key_type = T;
+  using init_type = T;
 
-  static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
-  static void destroy(void*, int64_t*) {}
-  static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
-    *new_slot = *old_slot;
+  template <class Allocator, class... Args>
+  static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+    absl::allocator_traits<Allocator>::construct(*alloc, slot,
+                                                 std::forward<Args>(args)...);
   }
 
-  static int64_t& element(slot_type* slot) { return *slot; }
+  template <class Allocator>
+  static void destroy(Allocator* alloc, slot_type* slot) {
+    absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+  }
 
-  template <class F>
-  static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
-    return std::forward<F>(f)(x, x);
+  template <class Allocator>
+  static void transfer(Allocator* alloc, slot_type* new_slot,
+                       slot_type* old_slot) {
+    construct(alloc, new_slot, std::move(*old_slot));
+    destroy(alloc, old_slot);
+  }
+
+  static T& element(slot_type* slot) { return *slot; }
+
+  template <class F, class... Args>
+  static decltype(absl::container_internal::DecomposeValue(
+      std::declval<F>(), std::declval<Args>()...))
+  apply(F&& f, Args&&... args) {
+    return absl::container_internal::DecomposeValue(
+        std::forward<F>(f), std::forward<Args>(args)...);
   }
 };
 
+using IntPolicy = ValuePolicy<int64_t>;
+using Uint8Policy = ValuePolicy<uint8_t>;
+
 class StringPolicy {
   template <class F, class K, class V,
             class = typename std::enable_if<
@@ -288,7 +355,7 @@
     struct ctor {};
 
     template <class... Ts>
-    slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+    explicit slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
 
     std::pair<std::string, std::string> pair;
   };
@@ -337,7 +404,7 @@
 struct StringTable
     : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
   using Base = typename StringTable::raw_hash_set;
-  StringTable() {}
+  StringTable() = default;
   using Base::Base;
 };
 
@@ -348,12 +415,19 @@
   using Base::Base;
 };
 
+struct Uint8Table
+    : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+                   std::equal_to<uint8_t>, std::allocator<uint8_t>> {
+  using Base = typename Uint8Table::raw_hash_set;
+  using Base::Base;
+};
+
 template <typename T>
 struct CustomAlloc : std::allocator<T> {
-  CustomAlloc() {}
+  CustomAlloc() = default;
 
   template <typename U>
-  CustomAlloc(const CustomAlloc<U>& other) {}
+  explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
 
   template<class U> struct rebind {
     using other = CustomAlloc<U>;
@@ -367,6 +441,63 @@
   using Base::Base;
 };
 
+// Tries to allocate memory at the minimum alignment even when the default
+// allocator uses a higher alignment.
+template <typename T>
+struct MinimumAlignmentAlloc : std::allocator<T> {
+  MinimumAlignmentAlloc() = default;
+
+  template <typename U>
+  explicit MinimumAlignmentAlloc(const MinimumAlignmentAlloc<U>& /*other*/) {}
+
+  template <class U>
+  struct rebind {
+    using other = MinimumAlignmentAlloc<U>;
+  };
+
+  T* allocate(size_t n) {
+    T* ptr = std::allocator<T>::allocate(n + 1);
+    char* cptr = reinterpret_cast<char*>(ptr);
+    cptr += alignof(T);
+    return reinterpret_cast<T*>(cptr);
+  }
+
+  void deallocate(T* ptr, size_t n) {
+    char* cptr = reinterpret_cast<char*>(ptr);
+    cptr -= alignof(T);
+    std::allocator<T>::deallocate(reinterpret_cast<T*>(cptr), n + 1);
+  }
+};
+
+struct MinimumAlignmentUint8Table
+    : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+                   std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
+  using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
+  using Base::Base;
+};
+
+// Allows for freezing the allocator to expect no further allocations.
+template <typename T>
+struct FreezableAlloc : std::allocator<T> {
+  explicit FreezableAlloc(bool* f) : frozen(f) {}
+
+  template <typename U>
+  explicit FreezableAlloc(const FreezableAlloc<U>& other)
+      : frozen(other.frozen) {}
+
+  template <class U>
+  struct rebind {
+    using other = FreezableAlloc<U>;
+  };
+
+  T* allocate(size_t n) {
+    EXPECT_FALSE(*frozen);
+    return std::allocator<T>::allocate(n);
+  }
+
+  bool* frozen;
+};
+
 struct BadFastHash {
   template <class T>
   size_t operator()(const T&) const {
@@ -374,10 +505,17 @@
   }
 };
 
+struct BadHashFreezableIntTable
+    : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
+                   FreezableAlloc<int64_t>> {
+  using Base = typename BadHashFreezableIntTable::raw_hash_set;
+  using Base::Base;
+};
+
 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
                                std::allocator<int>> {
   using Base = typename BadTable::raw_hash_set;
-  BadTable() {}
+  BadTable() = default;
   using Base::Base;
 };
 
@@ -386,12 +524,17 @@
   static_assert(std::is_empty<std::allocator<int>>::value, "");
 
   struct MockTable {
+    void* infoz;
     void* ctrl;
     void* slots;
     size_t size;
     size_t capacity;
-    size_t growth_left;
-    void* infoz;
+  };
+  struct MockTableInfozDisabled {
+    void* ctrl;
+    void* slots;
+    size_t size;
+    size_t capacity;
   };
   struct StatelessHash {
     size_t operator()(absl::string_view) const { return 0; }
@@ -400,14 +543,35 @@
     size_t dummy;
   };
 
+  struct GenerationData {
+    size_t reserved_growth;
+    size_t reservation_size;
+    GenerationType* generation;
+  };
+
+// Ignore unreachable-code warning. Compiler thinks one branch of each ternary
+// conditional is unreachable.
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunreachable-code"
+#endif
+  constexpr size_t mock_size = std::is_empty<HashtablezInfoHandle>()
+                                   ? sizeof(MockTableInfozDisabled)
+                                   : sizeof(MockTable);
+  constexpr size_t generation_size =
+      SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
   EXPECT_EQ(
-      sizeof(MockTable),
+      mock_size + generation_size,
       sizeof(
           raw_hash_set<StringPolicy, StatelessHash,
                        std::equal_to<absl::string_view>, std::allocator<int>>));
 
   EXPECT_EQ(
-      sizeof(MockTable) + sizeof(StatefulHash),
+      mock_size + sizeof(StatefulHash) + generation_size,
       sizeof(
           raw_hash_set<StringPolicy, StatefulHash,
                        std::equal_to<absl::string_view>, std::allocator<int>>));
@@ -498,6 +662,37 @@
   EXPECT_TRUE(t.empty());
 }
 
+TEST(Table, InsertWithinCapacity) {
+  IntTable t;
+  t.reserve(10);
+  const size_t original_capacity = t.capacity();
+  const auto addr = [&](int i) {
+    return reinterpret_cast<uintptr_t>(&*t.find(i));
+  };
+  // Inserting an element does not change capacity.
+  t.insert(0);
+  EXPECT_THAT(t.capacity(), original_capacity);
+  const uintptr_t original_addr_0 = addr(0);
+  // Inserting another element does not rehash.
+  t.insert(1);
+  EXPECT_THAT(t.capacity(), original_capacity);
+  EXPECT_THAT(addr(0), original_addr_0);
+  // Inserting lots of duplicate elements does not rehash.
+  for (int i = 0; i < 100; ++i) {
+    t.insert(i % 10);
+  }
+  EXPECT_THAT(t.capacity(), original_capacity);
+  EXPECT_THAT(addr(0), original_addr_0);
+  // Inserting a range of duplicate elements does not rehash.
+  std::vector<int> dup_range;
+  for (int i = 0; i < 100; ++i) {
+    dup_range.push_back(i % 10);
+  }
+  t.insert(dup_range.begin(), dup_range.end());
+  EXPECT_THAT(t.capacity(), original_capacity);
+  EXPECT_THAT(addr(0), original_addr_0);
+}
+
 TEST(Table, LazyEmplace) {
   StringTable t;
   bool called = false;
@@ -545,28 +740,53 @@
 }
 
 int decompose_constructed;
+int decompose_copy_constructed;
+int decompose_copy_assigned;
+int decompose_move_constructed;
+int decompose_move_assigned;
 struct DecomposeType {
-  DecomposeType(int i) : i(i) {  // NOLINT
+  DecomposeType(int i = 0) : i(i) {  // NOLINT
     ++decompose_constructed;
   }
 
   explicit DecomposeType(const char* d) : DecomposeType(*d) {}
 
+  DecomposeType(const DecomposeType& other) : i(other.i) {
+    ++decompose_copy_constructed;
+  }
+  DecomposeType& operator=(const DecomposeType& other) {
+    ++decompose_copy_assigned;
+    i = other.i;
+    return *this;
+  }
+  DecomposeType(DecomposeType&& other) : i(other.i) {
+    ++decompose_move_constructed;
+  }
+  DecomposeType& operator=(DecomposeType&& other) {
+    ++decompose_move_assigned;
+    i = other.i;
+    return *this;
+  }
+
   int i;
 };
 
 struct DecomposeHash {
   using is_transparent = void;
-  size_t operator()(DecomposeType a) const { return a.i; }
+  size_t operator()(const DecomposeType& a) const { return a.i; }
   size_t operator()(int a) const { return a; }
   size_t operator()(const char* a) const { return *a; }
 };
 
 struct DecomposeEq {
   using is_transparent = void;
-  bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
-  bool operator()(DecomposeType a, int b) const { return a.i == b; }
-  bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
+  bool operator()(const DecomposeType& a, const DecomposeType& b) const {
+    return a.i == b.i;
+  }
+  bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
+  bool operator()(const DecomposeType& a, const char* b) const {
+    return a.i == *b;
+  }
 };
 
 struct DecomposePolicy {
@@ -576,9 +796,9 @@
 
   template <typename T>
   static void construct(void*, DecomposeType* slot, T&& v) {
-    *slot = DecomposeType(std::forward<T>(v));
+    ::new (slot) DecomposeType(std::forward<T>(v));
   }
-  static void destroy(void*, DecomposeType*) {}
+  static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
   static DecomposeType& element(slot_type* slot) { return *slot; }
 
   template <class F, class T>
@@ -593,8 +813,13 @@
   const int one = 1;
   const char* three_p = "3";
   const auto& three = three_p;
+  const int elem_vector_count = 256;
+  std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
+  std::iota(elem_vector.begin(), elem_vector.end(), 0);
 
-  raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
+  using DecomposeSet =
+      raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
+  DecomposeSet set1;
 
   decompose_constructed = 0;
   int expected_constructed = 0;
@@ -652,20 +877,76 @@
     expected_constructed += construct_three;
     EXPECT_EQ(expected_constructed, decompose_constructed);
   }
+
+  decompose_copy_constructed = 0;
+  decompose_copy_assigned = 0;
+  decompose_move_constructed = 0;
+  decompose_move_assigned = 0;
+  int expected_copy_constructed = 0;
+  int expected_move_constructed = 0;
+  {  // raw_hash_set(first, last) with random-access iterators
+    DecomposeSet set2(elem_vector.begin(), elem_vector.end());
+    // Expect exactly one copy-constructor call for each element if no
+    // rehashing is done.
+    expected_copy_constructed += elem_vector_count;
+    EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+    EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
+    EXPECT_EQ(0, decompose_move_assigned);
+    EXPECT_EQ(0, decompose_copy_assigned);
+  }
+
+  {  // raw_hash_set(first, last) with forward iterators
+    std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
+    expected_copy_constructed = decompose_copy_constructed;
+    DecomposeSet set2(elem_list.begin(), elem_list.end());
+    // Expect exactly N elements copied into set, expect at most 2*N elements
+    // moving internally for all resizing needed (for a growth factor of 2).
+    expected_copy_constructed += elem_vector_count;
+    EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+    expected_move_constructed += elem_vector_count;
+    EXPECT_LT(expected_move_constructed, decompose_move_constructed);
+    expected_move_constructed += elem_vector_count;
+    EXPECT_GE(expected_move_constructed, decompose_move_constructed);
+    EXPECT_EQ(0, decompose_move_assigned);
+    EXPECT_EQ(0, decompose_copy_assigned);
+    expected_copy_constructed = decompose_copy_constructed;
+    expected_move_constructed = decompose_move_constructed;
+  }
+
+  {  // insert(first, last)
+    DecomposeSet set2;
+    set2.insert(elem_vector.begin(), elem_vector.end());
+    // Expect exactly N elements copied into set, expect at most 2*N elements
+    // moving internally for all resizing needed (for a growth factor of 2).
+    const int expected_new_elements = elem_vector_count;
+    const int expected_max_element_moves = 2 * elem_vector_count;
+    expected_copy_constructed += expected_new_elements;
+    EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+    expected_move_constructed += expected_max_element_moves;
+    EXPECT_GE(expected_move_constructed, decompose_move_constructed);
+    EXPECT_EQ(0, decompose_move_assigned);
+    EXPECT_EQ(0, decompose_copy_assigned);
+    expected_copy_constructed = decompose_copy_constructed;
+    expected_move_constructed = decompose_move_constructed;
+  }
 }
 
 TEST(Table, Decompose) {
+  if (SwisstableGenerationsEnabled()) {
+    GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+  }
+
   TestDecompose<DecomposeHash, DecomposeEq>(false);
 
   struct TransparentHashIntOverload {
-    size_t operator()(DecomposeType a) const { return a.i; }
+    size_t operator()(const DecomposeType& a) const { return a.i; }
     size_t operator()(int a) const { return a; }
   };
   struct TransparentEqIntOverload {
-    bool operator()(DecomposeType a, DecomposeType b) const {
+    bool operator()(const DecomposeType& a, const DecomposeType& b) const {
       return a.i == b.i;
     }
-    bool operator()(DecomposeType a, int b) const { return a.i == b; }
+    bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
   };
   TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
   TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
@@ -693,6 +974,10 @@
 
 // Test that rehash with no resize happen in case of many deleted slots.
 TEST(Table, RehashWithNoResize) {
+  if (SwisstableGenerationsEnabled()) {
+    GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+  }
+
   Modulo1000HashTable t;
   // Adding the same length (and the same hash) strings
   // to have at least kMinFullGroups groups
@@ -707,7 +992,7 @@
   const size_t capacity = t.capacity();
 
   // Remove elements from all groups except the first and the last one.
-  // All elements removed from full groups will be marked as kDeleted.
+  // All elements removed from full groups will be marked as ctrl_t::kDeleted.
   const size_t erase_begin = Group::kWidth / 2;
   const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
   for (size_t i = erase_begin; i < erase_end; ++i) {
@@ -786,6 +1071,10 @@
 }
 
 TEST(Table, ClearBug) {
+  if (SwisstableGenerationsEnabled()) {
+    GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+  }
+
   IntTable t;
   constexpr size_t capacity = container_internal::Group::kWidth - 1;
   constexpr size_t max_size = capacity / 2 + 1;
@@ -804,7 +1093,7 @@
   // We are checking that original and second are close enough to each other
   // that they are probably still in the same group.  This is not strictly
   // guaranteed.
-  EXPECT_LT(std::abs(original - second),
+  EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
             capacity * sizeof(IntTable::value_type));
 }
 
@@ -838,6 +1127,14 @@
   EXPECT_EQ(num_erase_calls, kNumElements);
 }
 
+TEST(Table, EraseBeginEnd) {
+  IntTable t;
+  for (int i = 0; i < 10; ++i) t.insert(i);
+  EXPECT_EQ(t.size(), 10);
+  t.erase(t.begin(), t.end());
+  EXPECT_EQ(t.size(), 0);
+}
+
 // Collect N bad keys by following algorithm:
 // 1. Create an empty table and reserve it to 2 * N.
 // 2. Insert N random elements.
@@ -847,7 +1144,8 @@
 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
   static constexpr int kGroupSize = Group::kWidth - 1;
 
-  auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
+  auto topk_range = [](size_t b, size_t e,
+                       IntTable* t) -> std::vector<int64_t> {
     for (size_t i = b; i != e; ++i) {
       t->emplace(i);
     }
@@ -880,19 +1178,6 @@
   // Ratios total_probe_length/size for every tested table.
   std::vector<double> single_table_ratios;
 
-  friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
-    ProbeStats res = a;
-    res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
-                                             b.all_probes_histogram.size()));
-    std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
-                   res.all_probes_histogram.begin(),
-                   res.all_probes_histogram.begin(), std::plus<size_t>());
-    res.single_table_ratios.insert(res.single_table_ratios.end(),
-                                   b.single_table_ratios.begin(),
-                                   b.single_table_ratios.end());
-    return res;
-  }
-
   // Average ratio total_probe_length/size over tables.
   double AvgRatio() const {
     return std::accumulate(single_table_ratios.begin(),
@@ -1001,8 +1286,8 @@
 // 1. Create new table and reserve it to keys.size() * 2
 // 2. Insert all keys xored with seed
 // 3. Collect ProbeStats from final table.
-ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
-                                                size_t num_iters) {
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
+    const std::vector<int64_t>& keys, size_t num_iters) {
   const size_t reserve_size = keys.size() * 2;
 
   ProbeStats stats;
@@ -1060,7 +1345,7 @@
     case 16:
       if (kRandomizesInserts) {
         return {0.1,
-                1.0,
+                2.0,
                 {{0.95, 0.1}},
                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
       } else {
@@ -1070,10 +1355,11 @@
                 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
       }
   }
-  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+  LOG(FATAL) << "Unknown Group width";
   return {};
 }
 
+// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
   ProbeStatsPerSize stats;
   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1085,6 +1371,7 @@
   for (size_t size : sizes) {
     auto& stat = stats[size];
     VerifyStats(size, expected, stat);
+    LOG(INFO) << size << " " << stat;
   }
 }
 
@@ -1146,17 +1433,17 @@
                 {{0.95, 0.3}},
                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
       } else {
-        return {0.15,
-                0.5,
-                {{0.95, 0.3}},
-                {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
+        return {0.4,
+                0.6,
+                {{0.95, 0.5}},
+                {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
       }
     case 16:
       if (kRandomizesInserts) {
         return {0.1,
                 0.4,
                 {{0.95, 0.3}},
-                {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+                {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
       } else {
         return {0.05,
                 0.2,
@@ -1164,10 +1451,11 @@
                 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
       }
   }
-  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+  LOG(FATAL) << "Unknown Group width";
   return {};
 }
 
+// TODO(b/80415403): Figure out why this test is so flaky.
 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
   ProbeStatsPerSize stats;
   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1179,6 +1467,7 @@
   for (size_t size : sizes) {
     auto& stat = stats[size];
     VerifyStats(size, expected, stat);
+    LOG(INFO) << size << " " << stat;
   }
 }
 
@@ -1313,7 +1602,7 @@
 TEST(Table, ConstructFromInitList) {
   using P = std::pair<std::string, std::string>;
   struct Q {
-    operator P() const { return {}; }
+    operator P() const { return {}; }  // NOLINT
   };
   StringTable t = {P(), Q(), {}, {{}, {}}};
 }
@@ -1351,7 +1640,7 @@
 struct ExplicitAllocIntTable
     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
                    std::equal_to<int64_t>, Alloc<int64_t>> {
-  ExplicitAllocIntTable() {}
+  ExplicitAllocIntTable() = default;
 };
 
 TEST(Table, AllocWithExplicitCtor) {
@@ -1618,7 +1907,6 @@
   EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
 }
 
-// TODO(alkis): Expand iterator tests.
 TEST(Iterator, IsDefaultConstructible) {
   StringTable::iterator i;
   EXPECT_TRUE(i == StringTable::iterator());
@@ -1656,6 +1944,38 @@
   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
 }
 
+TEST(Table, IteratorEmplaceConstructibleRequirement) {
+  struct Value {
+    explicit Value(absl::string_view view) : value(view) {}
+    std::string value;
+
+    bool operator==(const Value& other) const { return value == other.value; }
+  };
+  struct H {
+    size_t operator()(const Value& v) const {
+      return absl::Hash<std::string>{}(v.value);
+    }
+  };
+
+  struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
+                              std::allocator<Value>> {
+    using Base = typename Table::raw_hash_set;
+    using Base::Base;
+  };
+
+  std::string input[3]{"A", "B", "C"};
+
+  Table t(std::begin(input), std::end(input));
+  EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
+
+  input[0] = "D";
+  input[1] = "E";
+  input[2] = "F";
+  t.insert(std::begin(input), std::end(input));
+  EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
+                                      Value{"D"}, Value{"E"}, Value{"F"}));
+}
+
 TEST(Nodes, EmptyNodeType) {
   using node_type = StringTable::node_type;
   node_type n;
@@ -1707,7 +2027,27 @@
   EXPECT_FALSE(res.inserted);
   EXPECT_THAT(*res.position, Pair(k0, ""));
   EXPECT_TRUE(res.node);
-  EXPECT_FALSE(node);
+  EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
+}
+
+TEST(Nodes, HintInsert) {
+  IntTable t = {1, 2, 3};
+  auto node = t.extract(1);
+  EXPECT_THAT(t, UnorderedElementsAre(2, 3));
+  auto it = t.insert(t.begin(), std::move(node));
+  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+  EXPECT_EQ(*it, 1);
+  EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
+
+  node = t.extract(2);
+  EXPECT_THAT(t, UnorderedElementsAre(1, 3));
+  // reinsert 2 to make the next insert fail.
+  t.insert(2);
+  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+  it = t.insert(t.begin(), std::move(node));
+  EXPECT_EQ(*it, 2);
+  // The node was not emptied by the insert call.
+  EXPECT_TRUE(node);  // NOLINT(bugprone-use-after-move)
 }
 
 IntTable MakeSimpleTable(size_t size) {
@@ -1780,20 +2120,99 @@
   EXPECT_NE(old_ptr, addr(0));
 }
 
-// Confirm that we assert if we try to erase() end().
-TEST(TableDeathTest, EraseOfEndAsserts) {
+bool IsAssertEnabled() {
   // Use an assert with side-effects to figure out if they are actually enabled.
   bool assert_enabled = false;
-  assert([&]() {
+  assert([&]() {  // NOLINT
     assert_enabled = true;
     return true;
   }());
-  if (!assert_enabled) return;
+  return assert_enabled;
+}
+
+TEST(TableDeathTest, InvalidIteratorAsserts) {
+  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+    GTEST_SKIP() << "Assertions not enabled.";
 
   IntTable t;
   // Extra simple "regexp" as regexp support is highly varied across platforms.
-  constexpr char kDeathMsg[] = "Invalid operation on iterator";
-  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
+  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
+                            "erase.* called on end.. iterator.");
+  typename IntTable::iterator iter;
+  EXPECT_DEATH_IF_SUPPORTED(
+      ++iter, "operator.* called on default-constructed iterator.");
+  t.insert(0);
+  iter = t.begin();
+  t.erase(iter);
+  const char* const kErasedDeathMessage =
+      SwisstableGenerationsEnabled()
+          ? "operator.* called on invalid iterator.*was likely erased"
+          : "operator.* called on invalid iterator.*might have been "
+            "erased.*config=asan";
+  EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
+}
+
+// Invalid iterator use can trigger heap-use-after-free in asan,
+// use-of-uninitialized-value in msan, or invalidated iterator assertions.
+constexpr const char* kInvalidIteratorDeathMessage =
+    "heap-use-after-free|use-of-uninitialized-value|invalidated "
+    "iterator|Invalid iterator|invalid iterator";
+
+// MSVC doesn't support | in regex.
+#if defined(_MSC_VER)
+constexpr bool kMsvc = true;
+#else
+constexpr bool kMsvc = false;
+#endif
+
+TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
+  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+    GTEST_SKIP() << "Assertions not enabled.";
+
+  IntTable t;
+  t.insert(1);
+  t.insert(2);
+  t.insert(3);
+  auto iter1 = t.begin();
+  auto iter2 = std::next(iter1);
+  ASSERT_NE(iter1, t.end());
+  ASSERT_NE(iter2, t.end());
+  t.erase(iter1);
+  // Extra simple "regexp" as regexp support is highly varied across platforms.
+  const char* const kErasedDeathMessage =
+      SwisstableGenerationsEnabled()
+          ? "Invalid iterator comparison.*was likely erased"
+          : "Invalid iterator comparison.*might have been erased.*config=asan";
+  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
+  EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
+  t.erase(iter2);
+  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
+
+  IntTable t1, t2;
+  t1.insert(0);
+  t2.insert(0);
+  iter1 = t1.begin();
+  iter2 = t2.begin();
+  const char* const kContainerDiffDeathMessage =
+      SwisstableGenerationsEnabled()
+          ? "Invalid iterator comparison.*iterators from different hashtables"
+          : "Invalid iterator comparison.*may be from different "
+            ".*containers.*config=asan";
+  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
+  EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
+
+  for (int i = 0; i < 10; ++i) t1.insert(i);
+  // There should have been a rehash in t1.
+  if (kMsvc) return;  // MSVC doesn't support | in regex.
+
+  // NOTE(b/293887834): After rehashing, iterators will contain pointers to
+  // freed memory, which may be detected by ThreadSanitizer.
+  const char* const kRehashedDeathMessage =
+      SwisstableGenerationsEnabled()
+          ? kInvalidIteratorDeathMessage
+          : "Invalid iterator comparison.*might have rehashed.*config=asan"
+            "|ThreadSanitizer: heap-use-after-free";
+  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
 }
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -1802,20 +2221,62 @@
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
 
-  auto& sampler = HashtablezSampler::Global();
+  auto& sampler = GlobalHashtablezSampler();
   size_t start_size = 0;
-  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+  absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
+  start_size += sampler.Iterate([&](const HashtablezInfo& info) {
+    preexisting_info.insert(&info);
+    ++start_size;
+  });
 
   std::vector<IntTable> tables;
   for (int i = 0; i < 1000000; ++i) {
     tables.emplace_back();
+
+    const bool do_reserve = (i % 10 > 5);
+    const bool do_rehash = !do_reserve && (i % 10 > 0);
+
+    if (do_reserve) {
+      // Don't reserve on all tables.
+      tables.back().reserve(10 * (i % 10));
+    }
+
     tables.back().insert(1);
+    tables.back().insert(i % 5);
+
+    if (do_rehash) {
+      // Rehash some other tables.
+      tables.back().rehash(10 * (i % 10));
+    }
   }
   size_t end_size = 0;
-  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+  absl::flat_hash_map<size_t, int> observed_checksums;
+  absl::flat_hash_map<ssize_t, int> reservations;
+  end_size += sampler.Iterate([&](const HashtablezInfo& info) {
+    if (preexisting_info.count(&info) == 0) {
+      observed_checksums[info.hashes_bitwise_xor.load(
+          std::memory_order_relaxed)]++;
+      reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
+    }
+    EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
+    ++end_size;
+  });
 
   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
               0.01, 0.005);
+  EXPECT_EQ(observed_checksums.size(), 5);
+  for (const auto& [_, count] : observed_checksums) {
+    EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
+  }
+
+  EXPECT_EQ(reservations.size(), 10);
+  for (const auto& [reservation, count] : reservations) {
+    EXPECT_GE(reservation, 0);
+    EXPECT_LT(reservation, 100);
+
+    EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
+        << reservation;
+  }
 }
 #endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
 
@@ -1824,7 +2285,7 @@
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
 
-  auto& sampler = HashtablezSampler::Global();
+  auto& sampler = GlobalHashtablezSampler();
   size_t start_size = 0;
   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
 
@@ -1866,6 +2327,189 @@
 }
 #endif  // ABSL_HAVE_ADDRESS_SANITIZER
 
+template <typename T>
+class AlignOneTest : public ::testing::Test {};
+using AlignOneTestTypes =
+    ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
+TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
+
+TYPED_TEST(AlignOneTest, AlignOne) {
+  // We previously had a bug in which we were copying a control byte over the
+  // first slot when alignof(value_type) is 1. We test repeated
+  // insertions/erases and verify that the behavior is correct.
+  TypeParam t;
+  std::unordered_set<uint8_t> verifier;  // NOLINT
+
+  // Do repeated insertions/erases from the table.
+  for (int64_t i = 0; i < 100000; ++i) {
+    SCOPED_TRACE(i);
+    const uint8_t u = (i * -i) & 0xFF;
+    auto it = t.find(u);
+    auto verifier_it = verifier.find(u);
+    if (it == t.end()) {
+      ASSERT_EQ(verifier_it, verifier.end());
+      t.insert(u);
+      verifier.insert(u);
+    } else {
+      ASSERT_NE(verifier_it, verifier.end());
+      t.erase(it);
+      verifier.erase(verifier_it);
+    }
+  }
+
+  EXPECT_EQ(t.size(), verifier.size());
+  for (uint8_t u : t) {
+    EXPECT_EQ(verifier.count(u), 1);
+  }
+}
+
+TEST(Iterator, InvalidUseCrashesWithSanitizers) {
+  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+  IntTable t;
+  // Start with 1 element so that `it` is never an end iterator.
+  t.insert(-1);
+  for (int i = 0; i < 10; ++i) {
+    auto it = t.begin();
+    t.insert(i);
+    EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+    EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
+                              kInvalidIteratorDeathMessage);
+  }
+}
+
+TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
+  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+  IntTable t;
+  t.reserve(10);
+  t.insert(0);
+  auto it = t.begin();
+  // Reserved growth can't rehash.
+  for (int i = 1; i < 10; ++i) {
+    t.insert(i);
+    EXPECT_EQ(*it, 0);
+  }
+  // ptr will become invalidated on rehash.
+  const int64_t* ptr = &*it;
+  (void)ptr;
+
+  // erase decreases size but does not decrease reserved growth so the next
+  // insertion still invalidates iterators.
+  t.erase(0);
+  // The first insert after reserved growth is 0 is guaranteed to rehash when
+  // generations are enabled.
+  t.insert(10);
+  EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+  EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
+                            kInvalidIteratorDeathMessage);
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+  EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+#endif
+}
+
+TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
+  IntTable t;
+  for (int i = 0; i < 8; ++i) t.insert(i);
+  // Want to insert twice without invalidating iterators so reserve.
+  const size_t cap = t.capacity();
+  t.reserve(t.size() + 2);
+  // We want to be testing the case in which the reserve doesn't grow the table.
+  ASSERT_EQ(cap, t.capacity());
+  auto it = t.find(0);
+  t.insert(100);
+  t.insert(200);
+  // `it` shouldn't have been invalidated.
+  EXPECT_EQ(*it, 0);
+}
+
+TEST(Table, EraseBeginEndResetsReservedGrowth) {
+  bool frozen = false;
+  BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
+  t.reserve(100);
+  const size_t cap = t.capacity();
+  frozen = true;  // no further allocs allowed
+
+  for (int i = 0; i < 10; ++i) {
+    // Create a long run (hash function returns constant).
+    for (int j = 0; j < 100; ++j) t.insert(j);
+    // Erase elements from the middle of the long run, which creates tombstones.
+    for (int j = 30; j < 60; ++j) t.erase(j);
+    EXPECT_EQ(t.size(), 70);
+    EXPECT_EQ(t.capacity(), cap);
+    ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
+
+    t.erase(t.begin(), t.end());
+
+    EXPECT_EQ(t.size(), 0);
+    EXPECT_EQ(t.capacity(), cap);
+    ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
+  }
+}
+
+TEST(Table, GenerationInfoResetsOnClear) {
+  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+  IntTable t;
+  for (int i = 0; i < 1000; ++i) t.insert(i);
+  t.reserve(t.size() + 100);
+
+  t.clear();
+
+  t.insert(0);
+  auto it = t.begin();
+  t.insert(1);
+  EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+}
+
+TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
+  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+  GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
+#endif
+
+  IntTable t;
+  t.insert(0);
+  // Rehashing is guaranteed on every insertion while capacity is less than
+  // RehashProbabilityConstant().
+  int64_t i = 0;
+  while (t.capacity() <= RehashProbabilityConstant()) {
+    // ptr will become invalidated on rehash.
+    const int64_t* ptr = &*t.begin();
+    t.insert(++i);
+    EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free") << i;
+  }
+}
+
+TEST(Iterator, InvalidComparisonDifferentTables) {
+  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+
+  IntTable t1, t2;
+  IntTable::iterator default_constructed_iter;
+  // We randomly use one of N empty generations for generations from empty
+  // hashtables. In general, we won't always detect when iterators from
+  // different empty hashtables are compared, but in this test case, we
+  // should deterministically detect the error due to our randomness yielding
+  // consecutive random generations.
+  EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
+                            "Invalid iterator comparison.*empty hashtables");
+  EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
+                            "Invalid iterator comparison.*default-constructed");
+  t1.insert(0);
+  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+                            "Invalid iterator comparison.*empty hashtable");
+  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
+                            "Invalid iterator comparison.*default-constructed");
+  t2.insert(0);
+  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+                            "Invalid iterator comparison.*end.. iterator");
+  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
+                            "Invalid iterator comparison.*non-end");
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
index 76ee95e..7e84dc2 100644
--- a/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
+++ b/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -16,6 +16,7 @@
 #define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
 
 #include <algorithm>
+#include <unordered_map>
 #include <vector>
 
 #include "gmock/gmock.h"
@@ -178,7 +179,7 @@
   A alloc(0);
   std::vector<T> values;
   std::generate_n(std::back_inserter(values), 10,
-                  hash_internal::Generator<T>());
+                  hash_internal::UniqueGenerator<T>());
   TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
   EXPECT_EQ(m.hash_function(), hasher);
   EXPECT_EQ(m.key_eq(), equal);
@@ -197,7 +198,7 @@
   A alloc(0);
   std::vector<T> values;
   std::generate_n(std::back_inserter(values), 10,
-                  hash_internal::Generator<T>());
+                  hash_internal::UniqueGenerator<T>());
   TypeParam m(values.begin(), values.end(), 123, alloc);
   EXPECT_EQ(m.get_allocator(), alloc);
   EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
@@ -220,7 +221,7 @@
   A alloc(0);
   std::vector<T> values;
   std::generate_n(std::back_inserter(values), 10,
-                  hash_internal::Generator<T>());
+                  hash_internal::UniqueGenerator<T>());
   TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
   EXPECT_EQ(m.hash_function(), hasher);
   EXPECT_EQ(m.get_allocator(), alloc);
@@ -240,8 +241,9 @@
   H hasher;
   E equal;
   A alloc(0);
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m(123, hasher, equal, alloc);
-  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  for (size_t i = 0; i != 10; ++i) m.insert(gen());
   TypeParam n(m);
   EXPECT_EQ(m.hash_function(), n.hash_function());
   EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -261,8 +263,9 @@
   H hasher;
   E equal;
   A alloc(0);
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m(123, hasher, equal, alloc);
-  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  for (size_t i = 0; i != 10; ++i) m.insert(gen());
   TypeParam n(m, A(11));
   EXPECT_EQ(m.hash_function(), n.hash_function());
   EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -284,8 +287,9 @@
   H hasher;
   E equal;
   A alloc(0);
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m(123, hasher, equal, alloc);
-  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  for (size_t i = 0; i != 10; ++i) m.insert(gen());
   TypeParam t(m);
   TypeParam n(std::move(t));
   EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -306,8 +310,9 @@
   H hasher;
   E equal;
   A alloc(0);
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m(123, hasher, equal, alloc);
-  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  for (size_t i = 0; i != 10; ++i) m.insert(gen());
   TypeParam t(m);
   TypeParam n(std::move(t), A(1));
   EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -324,7 +329,7 @@
 
 TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
   using T = hash_internal::GeneratedType<TypeParam>;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
   using H = typename TypeParam::hasher;
   using E = typename TypeParam::key_equal;
@@ -347,7 +352,7 @@
 void InitializerListBucketAllocTest(std::true_type) {
   using T = hash_internal::GeneratedType<TypeParam>;
   using A = typename TypeParam::allocator_type;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
   A alloc(0);
   TypeParam m(values, 123, alloc);
@@ -370,7 +375,7 @@
   using A = typename TypeParam::allocator_type;
   H hasher;
   A alloc(0);
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
   TypeParam m(values, 123, hasher, alloc);
   EXPECT_EQ(m.hash_function(), hasher);
@@ -391,7 +396,7 @@
   H hasher;
   E equal;
   A alloc(0);
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
   TypeParam n;
   n = m;
@@ -411,7 +416,7 @@
   H hasher;
   E equal;
   A alloc(0);
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
   TypeParam t(m);
   TypeParam n;
@@ -423,7 +428,7 @@
 
 TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
   using T = hash_internal::GeneratedType<TypeParam>;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
   TypeParam m;
   m = values;
@@ -432,7 +437,7 @@
 
 TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
   using T = hash_internal::GeneratedType<TypeParam>;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m({gen(), gen(), gen()});
   TypeParam n({gen()});
   n = m;
@@ -441,7 +446,7 @@
 
 TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
   using T = hash_internal::GeneratedType<TypeParam>;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   TypeParam m({gen(), gen(), gen()});
   TypeParam t(m);
   TypeParam n({gen()});
@@ -451,7 +456,7 @@
 
 TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
   using T = hash_internal::GeneratedType<TypeParam>;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
   TypeParam m;
   m = values;
@@ -460,7 +465,7 @@
 
 TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
   using T = hash_internal::GeneratedType<TypeParam>;
-  hash_internal::Generator<T> gen;
+  hash_internal::UniqueGenerator<T> gen;
   std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
   TypeParam m(values);
   m = *&m;  // Avoid -Wself-assign
@@ -471,7 +476,7 @@
 // containers in unspecified state (and in practice in causes memory-leak
 // according to heap-checker!).
 
-REGISTER_TYPED_TEST_CASE_P(
+REGISTER_TYPED_TEST_SUITE_P(
     ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
     BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
     InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
diff --git a/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h b/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
index e76421e..3713cd9 100644
--- a/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
+++ b/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
@@ -107,8 +107,8 @@
   }
 }
 
-REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
-                           EqualRange);
+REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find,
+                            EqualRange);
 
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
index 8c9ca77..4d9ab30 100644
--- a/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
+++ b/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
@@ -81,6 +81,38 @@
   ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
 }
 
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  m.reserve(10);
+  const size_t original_capacity = m.bucket_count();
+  m.insert(val);
+  EXPECT_EQ(m.bucket_count(), original_capacity);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  m.insert(val2);
+  EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> base_values;
+  std::generate_n(std::back_inserter(base_values), 10,
+                  hash_internal::Generator<T>());
+  std::vector<T> values;
+  while (values.size() != 100) {
+    std::copy_n(base_values.begin(), 10, std::back_inserter(values));
+  }
+  TypeParam m;
+  m.reserve(10);
+  const size_t original_capacity = m.bucket_count();
+  m.insert(values.begin(), values.end());
+  EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
 TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
 #ifdef UNORDERED_MAP_CXX17
   using std::get;
@@ -265,10 +297,12 @@
 // TODO(alkis): Write tests for extract.
 // TODO(alkis): Write tests for merge.
 
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
-                           InsertRange, InsertOrAssign, InsertOrAssignHint,
-                           Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
-                           Erase, EraseRange, EraseKey, Swap);
+REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
+                            InsertRange, InsertWithinCapacity,
+                            InsertRangeWithinCapacity, InsertOrAssign,
+                            InsertOrAssignHint, Emplace, EmplaceHint,
+                            TryEmplace, TryEmplaceHint, Erase, EraseRange,
+                            EraseKey, Swap);
 
 template <typename Type>
 struct is_unique_ptr : std::false_type {};
diff --git a/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h b/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
index 41165b0..af1116e 100644
--- a/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
+++ b/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
@@ -478,7 +478,7 @@
   EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
 }
 
-REGISTER_TYPED_TEST_CASE_P(
+REGISTER_TYPED_TEST_SUITE_P(
     ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
     BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
     InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
diff --git a/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h b/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
index 8f2f4b2..b35f766 100644
--- a/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
+++ b/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
@@ -82,7 +82,7 @@
   }
 }
 
-REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange);
 
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
index 26be58d..d8864bb 100644
--- a/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
+++ b/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
@@ -74,6 +74,36 @@
   ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
 }
 
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  m.reserve(10);
+  const size_t original_capacity = m.bucket_count();
+  m.insert(val);
+  EXPECT_EQ(m.bucket_count(), original_capacity);
+  m.insert(val);
+  EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> base_values;
+  std::generate_n(std::back_inserter(base_values), 10,
+                  hash_internal::Generator<T>());
+  std::vector<T> values;
+  while (values.size() != 100) {
+    values.insert(values.end(), base_values.begin(), base_values.end());
+  }
+  TypeParam m;
+  m.reserve(10);
+  const size_t original_capacity = m.bucket_count();
+  m.insert(values.begin(), values.end());
+  EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
 TYPED_TEST_P(ModifiersTest, Emplace) {
   using T = hash_internal::GeneratedType<TypeParam>;
   T val = hash_internal::Generator<T>()();
@@ -179,9 +209,10 @@
 // TODO(alkis): Write tests for extract.
 // TODO(alkis): Write tests for merge.
 
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
-                           InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
-                           EraseKey, Swap);
+REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
+                            InsertRange, InsertWithinCapacity,
+                            InsertRangeWithinCapacity, Emplace, EmplaceHint,
+                            Erase, EraseRange, EraseKey, Swap);
 
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/node_hash_map.h b/abseil-cpp/absl/container/node_hash_map.h
index 7a39f62..a396de2 100644
--- a/abseil-cpp/absl/container/node_hash_map.h
+++ b/abseil-cpp/absl/container/node_hash_map.h
@@ -41,9 +41,10 @@
 #include <utility>
 
 #include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
 #include "absl/container/internal/raw_hash_map.h"  // IWYU pragma: export
 #include "absl/memory/memory.h"
 
@@ -77,6 +78,10 @@
 // absl/hash/hash.h for information on extending Abseil hashing to user-defined
 // types.
 //
+// Using `absl::node_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
 // Example:
 //
 //   // Create a node hash map of three strings (that map to strings)
@@ -221,7 +226,11 @@
   // iterator erase(const_iterator first, const_iterator last):
   //
   //   Erases the elements in the open interval [`first`, `last`), returning an
-  //   iterator pointing to `last`.
+  //   iterator pointing to `last`. The special case of calling
+  //   `erase(begin(), end())` resets the reserved growth such that if
+  //   `reserve(N)` has previously been called and there has been no intervening
+  //   call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+  //   to assume that inserting N elements will not cause a rehash.
   //
   // size_type erase(const key_type& key):
   //
@@ -347,8 +356,8 @@
   // `node_hash_map`.
   //
   //   iterator try_emplace(const_iterator hint,
-  //                        const init_type& k, Args&&... args):
-  //   iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+  //                        const key_type& k, Args&&... args):
+  //   iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
   //
   // Inserts (via copy or move) the element of the specified key into the
   // `node_hash_map` using the position of `hint` as a non-binding suggestion
@@ -399,7 +408,7 @@
   // for the past-the-end iterator, which is invalidated.
   //
   // `swap()` requires that the node hash map's hashing and key equivalence
-  // functions be Swappable, and are exchaged using unqualified calls to
+  // functions be Swappable, and are exchanged using unqualified calls to
   // non-member `swap()`. If the map's allocator has
   // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
   // set to `true`, the allocators are also exchanged using an unqualified call
@@ -525,17 +534,19 @@
 // erase_if(node_hash_map<>, Pred)
 //
 // Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
 template <typename K, typename V, typename H, typename E, typename A,
           typename Predicate>
-void erase_if(node_hash_map<K, V, H, E, A>& c, Predicate pred) {
-  container_internal::EraseIf(pred, &c);
+typename node_hash_map<K, V, H, E, A>::size_type erase_if(
+    node_hash_map<K, V, H, E, A>& c, Predicate pred) {
+  return container_internal::EraseIf(pred, &c);
 }
 
 namespace container_internal {
 
 template <class Key, class Value>
 class NodeHashMapPolicy
-    : public absl::container_internal::node_hash_policy<
+    : public absl::container_internal::node_slot_policy<
           std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
   using value_type = std::pair<const Key, Value>;
 
diff --git a/abseil-cpp/absl/container/node_hash_map_test.cc b/abseil-cpp/absl/container/node_hash_map_test.cc
index 8f59a1e..9bcf470 100644
--- a/abseil-cpp/absl/container/node_hash_map_test.cc
+++ b/abseil-cpp/absl/container/node_hash_map_test.cc
@@ -223,33 +223,36 @@
   // Erase all elements.
   {
     node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, [](std::pair<const int, int>) { return true; });
+    EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
     EXPECT_THAT(s, IsEmpty());
   }
   // Erase no elements.
   {
     node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, [](std::pair<const int, int>) { return false; });
+    EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
                                         Pair(4, 4), Pair(5, 5)));
   }
   // Erase specific elements.
   {
     node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s,
-             [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+    EXPECT_EQ(erase_if(s,
+                       [](std::pair<const int, int> kvp) {
+                         return kvp.first % 2 == 1;
+                       }),
+              3);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
   }
   // Predicate is function reference.
   {
     node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, FirstIsEven);
+    EXPECT_EQ(erase_if(s, FirstIsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
   }
   // Predicate is function pointer.
   {
     node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
-    erase_if(s, &FirstIsEven);
+    EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
   }
 }
@@ -269,6 +272,14 @@
 }
 #endif
 
+TEST(NodeHashMap, RecursiveTypeCompiles) {
+  struct RecursiveType {
+    node_hash_map<int, RecursiveType> m;
+  };
+  RecursiveType t;
+  t.m[0] = RecursiveType{};
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/container/node_hash_set.h b/abseil-cpp/absl/container/node_hash_set.h
index 56ce3b6..421ff46 100644
--- a/abseil-cpp/absl/container/node_hash_set.h
+++ b/abseil-cpp/absl/container/node_hash_set.h
@@ -18,7 +18,7 @@
 //
 // An `absl::node_hash_set<T>` is an unordered associative container designed to
 // be a more efficient replacement for `std::unordered_set`. Like
-// `unordered_set`, search, insertion, and deletion of map elements can be done
+// `unordered_set`, search, insertion, and deletion of set elements can be done
 // as an `O(1)` operation. However, `node_hash_set` (and other unordered
 // associative containers known as the collection of Abseil "Swiss tables")
 // contain other optimizations that result in both memory and computation
@@ -38,8 +38,9 @@
 #include <type_traits>
 
 #include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
 #include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
 #include "absl/container/internal/raw_hash_set.h"  // IWYU pragma: export
 #include "absl/memory/memory.h"
 
@@ -60,7 +61,7 @@
 // following notable differences:
 //
 // * Supports heterogeneous lookup, through `find()`, `operator[]()` and
-//   `insert()`, provided that the map is provided a compatible heterogeneous
+//   `insert()`, provided that the set is provided a compatible heterogeneous
 //   hashing function and equality operator.
 // * Contains a `capacity()` member function indicating the number of element
 //   slots (open, deleted, and empty) within the hash set.
@@ -73,16 +74,20 @@
 // absl/hash/hash.h for information on extending Abseil hashing to user-defined
 // types.
 //
+// Using `absl::node_hash_set` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
 // Example:
 //
 //   // Create a node hash set of three strings
-//   absl::node_hash_map<std::string, std::string> ducks =
+//   absl::node_hash_set<std::string> ducks =
 //     {"huey", "dewey", "louie"};
 //
-//  // Insert a new element into the node hash map
-//  ducks.insert("donald"};
+//  // Insert a new element into the node hash set
+//  ducks.insert("donald");
 //
-//  // Force a rehash of the node hash map
+//  // Force a rehash of the node hash set
 //  ducks.rehash(0);
 //
 //  // See if "dewey" is present
@@ -100,7 +105,7 @@
  public:
   // Constructors and Assignment Operators
   //
-  // A node_hash_set supports the same overload set as `std::unordered_map`
+  // A node_hash_set supports the same overload set as `std::unordered_set`
   // for construction and assignment:
   //
   // *  Default constructor
@@ -167,7 +172,7 @@
   // available within the `node_hash_set`.
   //
   // NOTE: this member function is particular to `absl::node_hash_set` and is
-  // not provided in the `std::unordered_map` API.
+  // not provided in the `std::unordered_set` API.
   using Base::capacity;
 
   // node_hash_set::empty()
@@ -208,12 +213,16 @@
   //   `void`.
   //
   //   NOTE: this return behavior is different than that of STL containers in
-  //   general and `std::unordered_map` in particular.
+  //   general and `std::unordered_set` in particular.
   //
   // iterator erase(const_iterator first, const_iterator last):
   //
   //   Erases the elements in the open interval [`first`, `last`), returning an
-  //   iterator pointing to `last`.
+  //   iterator pointing to `last`. The special case of calling
+  //   `erase(begin(), end())` resets the reserved growth such that if
+  //   `reserve(N)` has previously been called and there has been no intervening
+  //   call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+  //   to assume that inserting N elements will not cause a rehash.
   //
   // size_type erase(const key_type& key):
   //
@@ -314,7 +323,7 @@
 
   // node_hash_set::merge()
   //
-  // Extracts elements from a given `source` flat hash map into this
+  // Extracts elements from a given `source` node hash set into this
   // `node_hash_set`. If the destination `node_hash_set` already contains an
   // element with an equivalent key, that element is not extracted.
   using Base::merge;
@@ -322,15 +331,15 @@
   // node_hash_set::swap(node_hash_set& other)
   //
   // Exchanges the contents of this `node_hash_set` with those of the `other`
-  // flat hash map, avoiding invocation of any move, copy, or swap operations on
+  // node hash set, avoiding invocation of any move, copy, or swap operations on
   // individual elements.
   //
   // All iterators and references on the `node_hash_set` remain valid, excepting
   // for the past-the-end iterator, which is invalidated.
   //
-  // `swap()` requires that the flat hash set's hashing and key equivalence
-  // functions be Swappable, and are exchaged using unqualified calls to
-  // non-member `swap()`. If the map's allocator has
+  // `swap()` requires that the node hash set's hashing and key equivalence
+  // functions be Swappable, and are exchanged using unqualified calls to
+  // non-member `swap()`. If the set's allocator has
   // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
   // set to `true`, the allocators are also exchanged using an unqualified call
   // to non-member `swap()`; otherwise, the allocators are not swapped.
@@ -385,14 +394,14 @@
   // node_hash_set::bucket_count()
   //
   // Returns the number of "buckets" within the `node_hash_set`. Note that
-  // because a flat hash map contains all elements within its internal storage,
+  // because a node hash set contains all elements within its internal storage,
   // this value simply equals the current capacity of the `node_hash_set`.
   using Base::bucket_count;
 
   // node_hash_set::load_factor()
   //
   // Returns the current load factor of the `node_hash_set` (the average number
-  // of slots occupied with a value within the hash map).
+  // of slots occupied with a value within the hash set).
   using Base::load_factor;
 
   // node_hash_set::max_load_factor()
@@ -433,16 +442,18 @@
 // erase_if(node_hash_set<>, Pred)
 //
 // Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
 template <typename T, typename H, typename E, typename A, typename Predicate>
-void erase_if(node_hash_set<T, H, E, A>& c, Predicate pred) {
-  container_internal::EraseIf(pred, &c);
+typename node_hash_set<T, H, E, A>::size_type erase_if(
+    node_hash_set<T, H, E, A>& c, Predicate pred) {
+  return container_internal::EraseIf(pred, &c);
 }
 
 namespace container_internal {
 
 template <class T>
 struct NodeHashSetPolicy
-    : absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> {
+    : absl::container_internal::node_slot_policy<T&, NodeHashSetPolicy<T>> {
   using key_type = T;
   using init_type = T;
   using constant_iterators = std::true_type;
diff --git a/abseil-cpp/absl/container/node_hash_set_test.cc b/abseil-cpp/absl/container/node_hash_set_test.cc
index 7ddad20..98a8dbd 100644
--- a/abseil-cpp/absl/container/node_hash_set_test.cc
+++ b/abseil-cpp/absl/container/node_hash_set_test.cc
@@ -108,31 +108,31 @@
   // Erase all elements.
   {
     node_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, [](int) { return true; });
+    EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
     EXPECT_THAT(s, IsEmpty());
   }
   // Erase no elements.
   {
     node_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, [](int) { return false; });
+    EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
     EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
   }
   // Erase specific elements.
   {
     node_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, [](int k) { return k % 2 == 1; });
+    EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
     EXPECT_THAT(s, UnorderedElementsAre(2, 4));
   }
   // Predicate is function reference.
   {
     node_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, IsEven);
+    EXPECT_EQ(erase_if(s, IsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
   }
   // Predicate is function pointer.
   {
     node_hash_set<int> s = {1, 2, 3, 4, 5};
-    erase_if(s, &IsEven);
+    EXPECT_EQ(erase_if(s, &IsEven), 2);
     EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
   }
 }
diff --git a/abseil-cpp/absl/container/sample_element_size_test.cc b/abseil-cpp/absl/container/sample_element_size_test.cc
new file mode 100644
index 0000000..b23626b
--- /dev/null
+++ b/abseil-cpp/absl/container/sample_element_size_test.cc
@@ -0,0 +1,114 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+// Create some tables of type `Table`, then look at all the new
+// `HashtablezInfo`s to make sure that the `inline_element_size ==
+// expected_element_size`.  The `inline_element_size` is the amount of memory
+// allocated for each slot of a hash table, that is `sizeof(slot_type)`.  Add
+// the new `HashtablezInfo`s to `preexisting_info`.  Store all the new tables
+// into `tables`.
+template <class Table>
+void TestInlineElementSize(
+    HashtablezSampler& sampler,
+    // clang-tidy gives a false positive on this declaration.  This unordered
+    // set cannot be flat_hash_set, however, since that would introduce a mutex
+    // deadlock.
+    std::unordered_set<const HashtablezInfo*>& preexisting_info,  // NOLINT
+    std::vector<Table>& tables, const typename Table::value_type& elt,
+    size_t expected_element_size) {
+  for (int i = 0; i < 10; ++i) {
+    // We create a new table and must store it somewhere so that when we store
+    // a pointer to the resulting `HashtablezInfo` into `preexisting_info`
+    // that we aren't storing a dangling pointer.
+    tables.emplace_back();
+    // We must insert an element to get a hashtablez to instantiate.
+    tables.back().insert(elt);
+  }
+  size_t new_count = 0;
+  sampler.Iterate([&](const HashtablezInfo& info) {
+    if (preexisting_info.insert(&info).second) {
+      EXPECT_EQ(info.inline_element_size, expected_element_size);
+      ++new_count;
+    }
+  });
+  // Make sure we actually did get a new hashtablez.
+  EXPECT_GT(new_count, 0);
+}
+
+struct bigstruct {
+  char a[1000];
+  friend bool operator==(const bigstruct& x, const bigstruct& y) {
+    return memcmp(x.a, y.a, sizeof(x.a)) == 0;
+  }
+  template <typename H>
+  friend H AbslHashValue(H h, const bigstruct& c) {
+    return H::combine_contiguous(std::move(h), c.a, sizeof(c.a));
+  }
+};
+#endif
+
+TEST(FlatHashMap, SampleElementSize) {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+  // Enable sampling even if the prod default is off.
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(1);
+
+  auto& sampler = GlobalHashtablezSampler();
+  std::vector<flat_hash_map<int, bigstruct>> flat_map_tables;
+  std::vector<flat_hash_set<bigstruct>> flat_set_tables;
+  std::vector<node_hash_map<int, bigstruct>> node_map_tables;
+  std::vector<node_hash_set<bigstruct>> node_set_tables;
+
+  // It takes thousands of new tables after changing the sampling parameters
+  // before you actually get some instrumentation.  And if you must actually
+  // put something into those tables.
+  for (int i = 0; i < 10000; ++i) {
+    flat_map_tables.emplace_back();
+    flat_map_tables.back()[i] = bigstruct{};
+  }
+
+  // clang-tidy gives a false positive on this declaration.  This unordered set
+  // cannot be a flat_hash_set, however, since that would introduce a mutex
+  // deadlock.
+  std::unordered_set<const HashtablezInfo*> preexisting_info;  // NOLINT
+  sampler.Iterate(
+      [&](const HashtablezInfo& info) { preexisting_info.insert(&info); });
+  TestInlineElementSize(sampler, preexisting_info, flat_map_tables,
+                        {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct));
+  TestInlineElementSize(sampler, preexisting_info, node_map_tables,
+                        {0, bigstruct{}}, sizeof(void*));
+  TestInlineElementSize(sampler, preexisting_info, flat_set_tables,  //
+                        bigstruct{}, sizeof(bigstruct));
+  TestInlineElementSize(sampler, preexisting_info, node_set_tables,  //
+                        bigstruct{}, sizeof(void*));
+#endif
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake b/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake
index acd46d0..3f737c8 100644
--- a/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake
+++ b/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake
@@ -1,8 +1,6 @@
 # See absl/copts/copts.py and absl/copts/generate_copts.py
 include(GENERATED_AbseilCopts)
 
-set(ABSL_LSAN_LINKOPTS "")
-set(ABSL_HAVE_LSAN OFF)
 set(ABSL_DEFAULT_LINKOPTS "")
 
 if (BUILD_SHARED_LIBS AND MSVC)
@@ -12,51 +10,92 @@
   set(ABSL_BUILD_DLL FALSE)
 endif()
 
-if("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "x86_64|amd64|AMD64")
+if(APPLE AND CMAKE_CXX_COMPILER_ID MATCHES [[Clang]])
+  # Some CMake targets (not known at the moment of processing) could be set to
+  # compile for multiple architectures as specified by the OSX_ARCHITECTURES
+  # property, which is target-specific.  We should neither inspect nor rely on
+  # any CMake property or variable to detect an architecture, in particular:
+  #
+  #   - CMAKE_OSX_ARCHITECTURES
+  #     is just an initial value for OSX_ARCHITECTURES; set too early.
+  #
+  #   - OSX_ARCHITECTURES
+  #     is a per-target property; targets could be defined later, and their
+  #     properties could be modified any time later.
+  #
+  #   - CMAKE_SYSTEM_PROCESSOR
+  #     does not reflect multiple architectures at all.
+  #
+  # When compiling for multiple architectures, a build system can invoke a
+  # compiler either
+  #
+  #   - once: a single command line for multiple architectures (Ninja build)
+  #   - twice: two command lines per each architecture (Xcode build system)
+  #
+  # If case of Xcode, it would be possible to set an Xcode-specific attributes
+  # like XCODE_ATTRIBUTE_OTHER_CPLUSPLUSFLAGS[arch=arm64] or similar.
+  #
+  # In both cases, the viable strategy is to pass all arguments at once, allowing
+  # the compiler to dispatch arch-specific arguments to a designated backend.
+  set(ABSL_RANDOM_RANDEN_COPTS "")
+  foreach(_arch IN ITEMS "x86_64" "arm64")
+    string(TOUPPER "${_arch}" _arch_uppercase)
+    string(REPLACE "X86_64" "X64" _arch_uppercase ${_arch_uppercase})
+    foreach(_flag IN LISTS ABSL_RANDOM_HWAES_${_arch_uppercase}_FLAGS)
+      list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Xarch_${_arch}" "${_flag}")
+    endforeach()
+  endforeach()
+  # If a compiler happens to deal with an argument for a currently unused
+  # architecture, it will warn about an unused command line argument.
+  option(ABSL_RANDOM_RANDEN_COPTS_WARNING OFF
+         "Warn if one of ABSL_RANDOM_RANDEN_COPTS is unused")
+  if(ABSL_RANDOM_RANDEN_COPTS AND NOT ABSL_RANDOM_RANDEN_COPTS_WARNING)
+    list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Wno-unused-command-line-argument")
+  endif()
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64")
   if (MSVC)
     set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_MSVC_X64_FLAGS}")
   else()
     set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_X64_FLAGS}")
   endif()
-elseif("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "arm.*|aarch64")
-  if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8")
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*|aarch64")
+  if (CMAKE_SIZEOF_VOID_P STREQUAL "8")
     set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM64_FLAGS}")
-  elseif("${CMAKE_SIZEOF_VOID_P}" STREQUAL "4")
+  elseif(CMAKE_SIZEOF_VOID_P STREQUAL "4")
     set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM32_FLAGS}")
   else()
     message(WARNING "Value of CMAKE_SIZEOF_VOID_P (${CMAKE_SIZEOF_VOID_P}) is not supported.")
   endif()
 else()
-  message(WARNING "Value of CMAKE_SYSTEM_PROCESSOR (${CMAKE_SYSTEM_PROCESSOR}) is unknown and cannot be used to set ABSL_RANDOM_RANDEN_COPTS")
   set(ABSL_RANDOM_RANDEN_COPTS "")
 endif()
 
 
-if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
   set(ABSL_DEFAULT_COPTS "${ABSL_GCC_FLAGS}")
-  set(ABSL_TEST_COPTS "${ABSL_GCC_FLAGS};${ABSL_GCC_TEST_FLAGS}")
-elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
-  # MATCHES so we get both Clang and AppleClang
+  set(ABSL_TEST_COPTS "${ABSL_GCC_TEST_FLAGS}")
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")  # MATCHES so we get both Clang and AppleClang
   if(MSVC)
     # clang-cl is half MSVC, half LLVM
     set(ABSL_DEFAULT_COPTS "${ABSL_CLANG_CL_FLAGS}")
-    set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_FLAGS};${ABSL_CLANG_CL_TEST_FLAGS}")
-    set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}")
+    set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_TEST_FLAGS}")
   else()
     set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}")
-    set(ABSL_TEST_COPTS "${ABSL_LLVM_FLAGS};${ABSL_LLVM_TEST_FLAGS}")
-    if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
-      # AppleClang doesn't have lsan
-      # https://developer.apple.com/documentation/code_diagnostics
-      if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)
-        set(ABSL_LSAN_LINKOPTS "-fsanitize=leak")
-        set(ABSL_HAVE_LSAN ON)
-      endif()
-    endif()
+    set(ABSL_TEST_COPTS "${ABSL_LLVM_TEST_FLAGS}")
   endif()
-elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
+elseif(CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM")
+  # IntelLLVM is similar to Clang, with some additional flags.
+  if(MSVC)
+    # clang-cl is half MSVC, half LLVM
+    set(ABSL_DEFAULT_COPTS "${ABSL_CLANG_CL_FLAGS}")
+    set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_TEST_FLAGS}")
+  else()
+    set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}")
+    set(ABSL_TEST_COPTS "${ABSL_LLVM_TEST_FLAGS}")
+  endif()
+elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
   set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}")
-  set(ABSL_TEST_COPTS "${ABSL_MSVC_FLAGS};${ABSL_MSVC_TEST_FLAGS}")
+  set(ABSL_TEST_COPTS "${ABSL_MSVC_TEST_FLAGS}")
   set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}")
 else()
   message(WARNING "Unknown compiler: ${CMAKE_CXX_COMPILER}.  Building with no default flags")
diff --git a/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake b/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake
index 97bd283..430916f 100644
--- a/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake
+++ b/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake
@@ -5,47 +5,6 @@
 
 list(APPEND ABSL_CLANG_CL_FLAGS
     "/W3"
-    "-Wno-c++98-compat-pedantic"
-    "-Wno-conversion"
-    "-Wno-covered-switch-default"
-    "-Wno-deprecated"
-    "-Wno-disabled-macro-expansion"
-    "-Wno-double-promotion"
-    "-Wno-comma"
-    "-Wno-extra-semi"
-    "-Wno-extra-semi-stmt"
-    "-Wno-packed"
-    "-Wno-padded"
-    "-Wno-sign-compare"
-    "-Wno-float-conversion"
-    "-Wno-float-equal"
-    "-Wno-format-nonliteral"
-    "-Wno-gcc-compat"
-    "-Wno-global-constructors"
-    "-Wno-exit-time-destructors"
-    "-Wno-non-modular-include-in-module"
-    "-Wno-old-style-cast"
-    "-Wno-range-loop-analysis"
-    "-Wno-reserved-id-macro"
-    "-Wno-shorten-64-to-32"
-    "-Wno-switch-enum"
-    "-Wno-thread-safety-negative"
-    "-Wno-unknown-warning-option"
-    "-Wno-unreachable-code"
-    "-Wno-unused-macros"
-    "-Wno-weak-vtables"
-    "-Wno-zero-as-null-pointer-constant"
-    "-Wbitfield-enum-conversion"
-    "-Wbool-conversion"
-    "-Wconstant-conversion"
-    "-Wenum-conversion"
-    "-Wint-conversion"
-    "-Wliteral-conversion"
-    "-Wnon-literal-null-conversion"
-    "-Wnull-conversion"
-    "-Wobjc-literal-conversion"
-    "-Wno-sign-conversion"
-    "-Wstring-conversion"
     "/DNOMINMAX"
     "/DWIN32_LEAN_AND_MEAN"
     "/D_CRT_SECURE_NO_WARNINGS"
@@ -54,22 +13,27 @@
 )
 
 list(APPEND ABSL_CLANG_CL_TEST_FLAGS
-    "-Wno-c99-extensions"
+    "/W3"
+    "/DNOMINMAX"
+    "/DWIN32_LEAN_AND_MEAN"
+    "/D_CRT_SECURE_NO_WARNINGS"
+    "/D_SCL_SECURE_NO_WARNINGS"
+    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE"
     "-Wno-deprecated-declarations"
-    "-Wno-missing-noreturn"
+    "-Wno-implicit-int-conversion"
     "-Wno-missing-prototypes"
     "-Wno-missing-variable-declarations"
-    "-Wno-null-conversion"
     "-Wno-shadow"
-    "-Wno-shift-sign-overflow"
+    "-Wno-shorten-64-to-32"
     "-Wno-sign-compare"
+    "-Wno-sign-conversion"
+    "-Wno-unreachable-code-loop-increment"
     "-Wno-unused-function"
     "-Wno-unused-member-function"
     "-Wno-unused-parameter"
     "-Wno-unused-private-field"
     "-Wno-unused-template"
     "-Wno-used-but-marked-unused"
-    "-Wno-zero-as-null-pointer-constant"
     "-Wno-gnu-zero-variadic-macro-arguments"
 )
 
@@ -78,6 +42,7 @@
     "-Wextra"
     "-Wcast-qual"
     "-Wconversion-null"
+    "-Wformat-security"
     "-Wmissing-declarations"
     "-Woverlength-strings"
     "-Wpointer-arith"
@@ -87,15 +52,27 @@
     "-Wvarargs"
     "-Wvla"
     "-Wwrite-strings"
-    "-Wno-missing-field-initializers"
-    "-Wno-sign-compare"
     "-DNOMINMAX"
 )
 
 list(APPEND ABSL_GCC_TEST_FLAGS
-    "-Wno-conversion-null"
+    "-Wall"
+    "-Wextra"
+    "-Wcast-qual"
+    "-Wconversion-null"
+    "-Wformat-security"
+    "-Woverlength-strings"
+    "-Wpointer-arith"
+    "-Wundef"
+    "-Wunused-local-typedefs"
+    "-Wunused-result"
+    "-Wvarargs"
+    "-Wvla"
+    "-Wwrite-strings"
+    "-DNOMINMAX"
     "-Wno-deprecated-declarations"
     "-Wno-missing-declarations"
+    "-Wno-self-move"
     "-Wno-sign-compare"
     "-Wno-unused-function"
     "-Wno-unused-parameter"
@@ -105,78 +82,95 @@
 list(APPEND ABSL_LLVM_FLAGS
     "-Wall"
     "-Wextra"
-    "-Weverything"
-    "-Wno-c++98-compat-pedantic"
-    "-Wno-conversion"
-    "-Wno-covered-switch-default"
-    "-Wno-deprecated"
-    "-Wno-disabled-macro-expansion"
-    "-Wno-double-promotion"
-    "-Wno-comma"
-    "-Wno-extra-semi"
-    "-Wno-extra-semi-stmt"
-    "-Wno-packed"
-    "-Wno-padded"
-    "-Wno-sign-compare"
-    "-Wno-float-conversion"
-    "-Wno-float-equal"
-    "-Wno-format-nonliteral"
-    "-Wno-gcc-compat"
-    "-Wno-global-constructors"
-    "-Wno-exit-time-destructors"
-    "-Wno-non-modular-include-in-module"
-    "-Wno-old-style-cast"
-    "-Wno-range-loop-analysis"
-    "-Wno-reserved-id-macro"
-    "-Wno-shorten-64-to-32"
-    "-Wno-switch-enum"
-    "-Wno-thread-safety-negative"
-    "-Wno-unknown-warning-option"
-    "-Wno-unreachable-code"
-    "-Wno-unused-macros"
-    "-Wno-weak-vtables"
-    "-Wno-zero-as-null-pointer-constant"
-    "-Wbitfield-enum-conversion"
-    "-Wbool-conversion"
-    "-Wconstant-conversion"
-    "-Wenum-conversion"
-    "-Wint-conversion"
+    "-Wcast-qual"
+    "-Wconversion"
+    "-Wfloat-overflow-conversion"
+    "-Wfloat-zero-conversion"
+    "-Wfor-loop-analysis"
+    "-Wformat-security"
+    "-Wgnu-redeclared-enum"
+    "-Winfinite-recursion"
+    "-Winvalid-constexpr"
     "-Wliteral-conversion"
-    "-Wnon-literal-null-conversion"
-    "-Wnull-conversion"
-    "-Wobjc-literal-conversion"
-    "-Wno-sign-conversion"
+    "-Wmissing-declarations"
+    "-Woverlength-strings"
+    "-Wpointer-arith"
+    "-Wself-assign"
+    "-Wshadow-all"
+    "-Wshorten-64-to-32"
+    "-Wsign-conversion"
     "-Wstring-conversion"
+    "-Wtautological-overlap-compare"
+    "-Wtautological-unsigned-zero-compare"
+    "-Wundef"
+    "-Wuninitialized"
+    "-Wunreachable-code"
+    "-Wunused-comparison"
+    "-Wunused-local-typedefs"
+    "-Wunused-result"
+    "-Wvla"
+    "-Wwrite-strings"
+    "-Wno-float-conversion"
+    "-Wno-implicit-float-conversion"
+    "-Wno-implicit-int-float-conversion"
+    "-Wno-unknown-warning-option"
     "-DNOMINMAX"
 )
 
 list(APPEND ABSL_LLVM_TEST_FLAGS
-    "-Wno-c99-extensions"
+    "-Wall"
+    "-Wextra"
+    "-Wcast-qual"
+    "-Wconversion"
+    "-Wfloat-overflow-conversion"
+    "-Wfloat-zero-conversion"
+    "-Wfor-loop-analysis"
+    "-Wformat-security"
+    "-Wgnu-redeclared-enum"
+    "-Winfinite-recursion"
+    "-Winvalid-constexpr"
+    "-Wliteral-conversion"
+    "-Wmissing-declarations"
+    "-Woverlength-strings"
+    "-Wpointer-arith"
+    "-Wself-assign"
+    "-Wshadow-all"
+    "-Wstring-conversion"
+    "-Wtautological-overlap-compare"
+    "-Wtautological-unsigned-zero-compare"
+    "-Wundef"
+    "-Wuninitialized"
+    "-Wunreachable-code"
+    "-Wunused-comparison"
+    "-Wunused-local-typedefs"
+    "-Wunused-result"
+    "-Wvla"
+    "-Wwrite-strings"
+    "-Wno-float-conversion"
+    "-Wno-implicit-float-conversion"
+    "-Wno-implicit-int-float-conversion"
+    "-Wno-unknown-warning-option"
+    "-DNOMINMAX"
     "-Wno-deprecated-declarations"
-    "-Wno-missing-noreturn"
+    "-Wno-implicit-int-conversion"
     "-Wno-missing-prototypes"
     "-Wno-missing-variable-declarations"
-    "-Wno-null-conversion"
     "-Wno-shadow"
-    "-Wno-shift-sign-overflow"
+    "-Wno-shorten-64-to-32"
     "-Wno-sign-compare"
+    "-Wno-sign-conversion"
+    "-Wno-unreachable-code-loop-increment"
     "-Wno-unused-function"
     "-Wno-unused-member-function"
     "-Wno-unused-parameter"
     "-Wno-unused-private-field"
     "-Wno-unused-template"
     "-Wno-used-but-marked-unused"
-    "-Wno-zero-as-null-pointer-constant"
     "-Wno-gnu-zero-variadic-macro-arguments"
 )
 
 list(APPEND ABSL_MSVC_FLAGS
     "/W3"
-    "/DNOMINMAX"
-    "/DWIN32_LEAN_AND_MEAN"
-    "/D_CRT_SECURE_NO_WARNINGS"
-    "/D_SCL_SECURE_NO_WARNINGS"
-    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE"
     "/bigobj"
     "/wd4005"
     "/wd4068"
@@ -185,6 +179,11 @@
     "/wd4267"
     "/wd4503"
     "/wd4800"
+    "/DNOMINMAX"
+    "/DWIN32_LEAN_AND_MEAN"
+    "/D_CRT_SECURE_NO_WARNINGS"
+    "/D_SCL_SECURE_NO_WARNINGS"
+    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE"
 )
 
 list(APPEND ABSL_MSVC_LINKOPTS
@@ -192,6 +191,20 @@
 )
 
 list(APPEND ABSL_MSVC_TEST_FLAGS
+    "/W3"
+    "/bigobj"
+    "/wd4005"
+    "/wd4068"
+    "/wd4180"
+    "/wd4244"
+    "/wd4267"
+    "/wd4503"
+    "/wd4800"
+    "/DNOMINMAX"
+    "/DWIN32_LEAN_AND_MEAN"
+    "/D_CRT_SECURE_NO_WARNINGS"
+    "/D_SCL_SECURE_NO_WARNINGS"
+    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE"
     "/wd4018"
     "/wd4101"
     "/wd4503"
diff --git a/abseil-cpp/absl/copts/GENERATED_copts.bzl b/abseil-cpp/absl/copts/GENERATED_copts.bzl
index bcdd61e..011d8a9 100644
--- a/abseil-cpp/absl/copts/GENERATED_copts.bzl
+++ b/abseil-cpp/absl/copts/GENERATED_copts.bzl
@@ -6,47 +6,6 @@
 
 ABSL_CLANG_CL_FLAGS = [
     "/W3",
-    "-Wno-c++98-compat-pedantic",
-    "-Wno-conversion",
-    "-Wno-covered-switch-default",
-    "-Wno-deprecated",
-    "-Wno-disabled-macro-expansion",
-    "-Wno-double-promotion",
-    "-Wno-comma",
-    "-Wno-extra-semi",
-    "-Wno-extra-semi-stmt",
-    "-Wno-packed",
-    "-Wno-padded",
-    "-Wno-sign-compare",
-    "-Wno-float-conversion",
-    "-Wno-float-equal",
-    "-Wno-format-nonliteral",
-    "-Wno-gcc-compat",
-    "-Wno-global-constructors",
-    "-Wno-exit-time-destructors",
-    "-Wno-non-modular-include-in-module",
-    "-Wno-old-style-cast",
-    "-Wno-range-loop-analysis",
-    "-Wno-reserved-id-macro",
-    "-Wno-shorten-64-to-32",
-    "-Wno-switch-enum",
-    "-Wno-thread-safety-negative",
-    "-Wno-unknown-warning-option",
-    "-Wno-unreachable-code",
-    "-Wno-unused-macros",
-    "-Wno-weak-vtables",
-    "-Wno-zero-as-null-pointer-constant",
-    "-Wbitfield-enum-conversion",
-    "-Wbool-conversion",
-    "-Wconstant-conversion",
-    "-Wenum-conversion",
-    "-Wint-conversion",
-    "-Wliteral-conversion",
-    "-Wnon-literal-null-conversion",
-    "-Wnull-conversion",
-    "-Wobjc-literal-conversion",
-    "-Wno-sign-conversion",
-    "-Wstring-conversion",
     "/DNOMINMAX",
     "/DWIN32_LEAN_AND_MEAN",
     "/D_CRT_SECURE_NO_WARNINGS",
@@ -55,22 +14,27 @@
 ]
 
 ABSL_CLANG_CL_TEST_FLAGS = [
-    "-Wno-c99-extensions",
+    "/W3",
+    "/DNOMINMAX",
+    "/DWIN32_LEAN_AND_MEAN",
+    "/D_CRT_SECURE_NO_WARNINGS",
+    "/D_SCL_SECURE_NO_WARNINGS",
+    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
     "-Wno-deprecated-declarations",
-    "-Wno-missing-noreturn",
+    "-Wno-implicit-int-conversion",
     "-Wno-missing-prototypes",
     "-Wno-missing-variable-declarations",
-    "-Wno-null-conversion",
     "-Wno-shadow",
-    "-Wno-shift-sign-overflow",
+    "-Wno-shorten-64-to-32",
     "-Wno-sign-compare",
+    "-Wno-sign-conversion",
+    "-Wno-unreachable-code-loop-increment",
     "-Wno-unused-function",
     "-Wno-unused-member-function",
     "-Wno-unused-parameter",
     "-Wno-unused-private-field",
     "-Wno-unused-template",
     "-Wno-used-but-marked-unused",
-    "-Wno-zero-as-null-pointer-constant",
     "-Wno-gnu-zero-variadic-macro-arguments",
 ]
 
@@ -79,6 +43,7 @@
     "-Wextra",
     "-Wcast-qual",
     "-Wconversion-null",
+    "-Wformat-security",
     "-Wmissing-declarations",
     "-Woverlength-strings",
     "-Wpointer-arith",
@@ -88,15 +53,27 @@
     "-Wvarargs",
     "-Wvla",
     "-Wwrite-strings",
-    "-Wno-missing-field-initializers",
-    "-Wno-sign-compare",
     "-DNOMINMAX",
 ]
 
 ABSL_GCC_TEST_FLAGS = [
-    "-Wno-conversion-null",
+    "-Wall",
+    "-Wextra",
+    "-Wcast-qual",
+    "-Wconversion-null",
+    "-Wformat-security",
+    "-Woverlength-strings",
+    "-Wpointer-arith",
+    "-Wundef",
+    "-Wunused-local-typedefs",
+    "-Wunused-result",
+    "-Wvarargs",
+    "-Wvla",
+    "-Wwrite-strings",
+    "-DNOMINMAX",
     "-Wno-deprecated-declarations",
     "-Wno-missing-declarations",
+    "-Wno-self-move",
     "-Wno-sign-compare",
     "-Wno-unused-function",
     "-Wno-unused-parameter",
@@ -106,78 +83,95 @@
 ABSL_LLVM_FLAGS = [
     "-Wall",
     "-Wextra",
-    "-Weverything",
-    "-Wno-c++98-compat-pedantic",
-    "-Wno-conversion",
-    "-Wno-covered-switch-default",
-    "-Wno-deprecated",
-    "-Wno-disabled-macro-expansion",
-    "-Wno-double-promotion",
-    "-Wno-comma",
-    "-Wno-extra-semi",
-    "-Wno-extra-semi-stmt",
-    "-Wno-packed",
-    "-Wno-padded",
-    "-Wno-sign-compare",
-    "-Wno-float-conversion",
-    "-Wno-float-equal",
-    "-Wno-format-nonliteral",
-    "-Wno-gcc-compat",
-    "-Wno-global-constructors",
-    "-Wno-exit-time-destructors",
-    "-Wno-non-modular-include-in-module",
-    "-Wno-old-style-cast",
-    "-Wno-range-loop-analysis",
-    "-Wno-reserved-id-macro",
-    "-Wno-shorten-64-to-32",
-    "-Wno-switch-enum",
-    "-Wno-thread-safety-negative",
-    "-Wno-unknown-warning-option",
-    "-Wno-unreachable-code",
-    "-Wno-unused-macros",
-    "-Wno-weak-vtables",
-    "-Wno-zero-as-null-pointer-constant",
-    "-Wbitfield-enum-conversion",
-    "-Wbool-conversion",
-    "-Wconstant-conversion",
-    "-Wenum-conversion",
-    "-Wint-conversion",
+    "-Wcast-qual",
+    "-Wconversion",
+    "-Wfloat-overflow-conversion",
+    "-Wfloat-zero-conversion",
+    "-Wfor-loop-analysis",
+    "-Wformat-security",
+    "-Wgnu-redeclared-enum",
+    "-Winfinite-recursion",
+    "-Winvalid-constexpr",
     "-Wliteral-conversion",
-    "-Wnon-literal-null-conversion",
-    "-Wnull-conversion",
-    "-Wobjc-literal-conversion",
-    "-Wno-sign-conversion",
+    "-Wmissing-declarations",
+    "-Woverlength-strings",
+    "-Wpointer-arith",
+    "-Wself-assign",
+    "-Wshadow-all",
+    "-Wshorten-64-to-32",
+    "-Wsign-conversion",
     "-Wstring-conversion",
+    "-Wtautological-overlap-compare",
+    "-Wtautological-unsigned-zero-compare",
+    "-Wundef",
+    "-Wuninitialized",
+    "-Wunreachable-code",
+    "-Wunused-comparison",
+    "-Wunused-local-typedefs",
+    "-Wunused-result",
+    "-Wvla",
+    "-Wwrite-strings",
+    "-Wno-float-conversion",
+    "-Wno-implicit-float-conversion",
+    "-Wno-implicit-int-float-conversion",
+    "-Wno-unknown-warning-option",
     "-DNOMINMAX",
 ]
 
 ABSL_LLVM_TEST_FLAGS = [
-    "-Wno-c99-extensions",
+    "-Wall",
+    "-Wextra",
+    "-Wcast-qual",
+    "-Wconversion",
+    "-Wfloat-overflow-conversion",
+    "-Wfloat-zero-conversion",
+    "-Wfor-loop-analysis",
+    "-Wformat-security",
+    "-Wgnu-redeclared-enum",
+    "-Winfinite-recursion",
+    "-Winvalid-constexpr",
+    "-Wliteral-conversion",
+    "-Wmissing-declarations",
+    "-Woverlength-strings",
+    "-Wpointer-arith",
+    "-Wself-assign",
+    "-Wshadow-all",
+    "-Wstring-conversion",
+    "-Wtautological-overlap-compare",
+    "-Wtautological-unsigned-zero-compare",
+    "-Wundef",
+    "-Wuninitialized",
+    "-Wunreachable-code",
+    "-Wunused-comparison",
+    "-Wunused-local-typedefs",
+    "-Wunused-result",
+    "-Wvla",
+    "-Wwrite-strings",
+    "-Wno-float-conversion",
+    "-Wno-implicit-float-conversion",
+    "-Wno-implicit-int-float-conversion",
+    "-Wno-unknown-warning-option",
+    "-DNOMINMAX",
     "-Wno-deprecated-declarations",
-    "-Wno-missing-noreturn",
+    "-Wno-implicit-int-conversion",
     "-Wno-missing-prototypes",
     "-Wno-missing-variable-declarations",
-    "-Wno-null-conversion",
     "-Wno-shadow",
-    "-Wno-shift-sign-overflow",
+    "-Wno-shorten-64-to-32",
     "-Wno-sign-compare",
+    "-Wno-sign-conversion",
+    "-Wno-unreachable-code-loop-increment",
     "-Wno-unused-function",
     "-Wno-unused-member-function",
     "-Wno-unused-parameter",
     "-Wno-unused-private-field",
     "-Wno-unused-template",
     "-Wno-used-but-marked-unused",
-    "-Wno-zero-as-null-pointer-constant",
     "-Wno-gnu-zero-variadic-macro-arguments",
 ]
 
 ABSL_MSVC_FLAGS = [
     "/W3",
-    "/DNOMINMAX",
-    "/DWIN32_LEAN_AND_MEAN",
-    "/D_CRT_SECURE_NO_WARNINGS",
-    "/D_SCL_SECURE_NO_WARNINGS",
-    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
     "/bigobj",
     "/wd4005",
     "/wd4068",
@@ -186,6 +180,11 @@
     "/wd4267",
     "/wd4503",
     "/wd4800",
+    "/DNOMINMAX",
+    "/DWIN32_LEAN_AND_MEAN",
+    "/D_CRT_SECURE_NO_WARNINGS",
+    "/D_SCL_SECURE_NO_WARNINGS",
+    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
 ]
 
 ABSL_MSVC_LINKOPTS = [
@@ -193,6 +192,20 @@
 ]
 
 ABSL_MSVC_TEST_FLAGS = [
+    "/W3",
+    "/bigobj",
+    "/wd4005",
+    "/wd4068",
+    "/wd4180",
+    "/wd4244",
+    "/wd4267",
+    "/wd4503",
+    "/wd4800",
+    "/DNOMINMAX",
+    "/DWIN32_LEAN_AND_MEAN",
+    "/D_CRT_SECURE_NO_WARNINGS",
+    "/D_SCL_SECURE_NO_WARNINGS",
+    "/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
     "/wd4018",
     "/wd4101",
     "/wd4503",
diff --git a/abseil-cpp/absl/copts/configure_copts.bzl b/abseil-cpp/absl/copts/configure_copts.bzl
index ff9a5ea..ca5f26d 100644
--- a/abseil-cpp/absl/copts/configure_copts.bzl
+++ b/abseil-cpp/absl/copts/configure_copts.bzl
@@ -22,21 +22,23 @@
 )
 
 ABSL_DEFAULT_COPTS = select({
-    "//absl:windows": ABSL_MSVC_FLAGS,
-    "//absl:llvm_compiler": ABSL_LLVM_FLAGS,
+    "//absl:msvc_compiler": ABSL_MSVC_FLAGS,
+    "//absl:clang-cl_compiler": ABSL_CLANG_CL_FLAGS,
+    "//absl:clang_compiler": ABSL_LLVM_FLAGS,
+    "//absl:gcc_compiler": ABSL_GCC_FLAGS,
     "//conditions:default": ABSL_GCC_FLAGS,
 })
 
-# in absence of modules (--compiler=gcc or -c opt), cc_tests leak their copts
-# to their (included header) dependencies and fail to build outside absl
-ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({
-    "//absl:windows": ABSL_MSVC_TEST_FLAGS,
-    "//absl:llvm_compiler": ABSL_LLVM_TEST_FLAGS,
+ABSL_TEST_COPTS = select({
+    "//absl:msvc_compiler": ABSL_MSVC_TEST_FLAGS,
+    "//absl:clang-cl_compiler": ABSL_CLANG_CL_TEST_FLAGS,
+    "//absl:clang_compiler": ABSL_LLVM_TEST_FLAGS,
+    "//absl:gcc_compiler": ABSL_GCC_TEST_FLAGS,
     "//conditions:default": ABSL_GCC_TEST_FLAGS,
 })
 
 ABSL_DEFAULT_LINKOPTS = select({
-    "//absl:windows": ABSL_MSVC_LINKOPTS,
+    "//absl:msvc_compiler": ABSL_MSVC_LINKOPTS,
     "//conditions:default": [],
 })
 
@@ -50,6 +52,7 @@
     ":cpu_x64_windows": ABSL_RANDOM_HWAES_MSVC_X64_FLAGS,
     ":cpu_k8": ABSL_RANDOM_HWAES_X64_FLAGS,
     ":cpu_ppc": ["-mcrypto"],
+    ":cpu_aarch64": ABSL_RANDOM_HWAES_ARM64_FLAGS,
 
     # Supported by default or unsupported.
     "//conditions:default": [],
@@ -70,6 +73,7 @@
         "darwin",
         "x64_windows_msvc",
         "x64_windows",
+        "aarch64",
     ]
     for cpu in cpu_configs:
         native.config_setting(
diff --git a/abseil-cpp/absl/copts/copts.py b/abseil-cpp/absl/copts/copts.py
index a3437c1..e6e1194 100644
--- a/abseil-cpp/absl/copts/copts.py
+++ b/abseil-cpp/absl/copts/copts.py
@@ -11,103 +11,120 @@
 AbseilConfigureCopts.cmake.
 """
 
-# /Wall with msvc includes unhelpful warnings such as C4711, C4710, ...
-MSVC_BIG_WARNING_FLAGS = [
-    "/W3",
-]
-
-LLVM_BIG_WARNING_FLAGS = [
+ABSL_GCC_FLAGS = [
     "-Wall",
     "-Wextra",
-    "-Weverything",
+    "-Wcast-qual",
+    "-Wconversion-null",
+    "-Wformat-security",
+    "-Wmissing-declarations",
+    "-Woverlength-strings",
+    "-Wpointer-arith",
+    "-Wundef",
+    "-Wunused-local-typedefs",
+    "-Wunused-result",
+    "-Wvarargs",
+    "-Wvla",  # variable-length array
+    "-Wwrite-strings",
+    # Don't define min and max macros (Build on Windows using gcc)
+    "-DNOMINMAX",
 ]
 
-# Docs on single flags is preceded by a comment.
-# Docs on groups of flags is preceded by ###.
-LLVM_DISABLE_WARNINGS_FLAGS = [
-    # Abseil does not support C++98
-    "-Wno-c++98-compat-pedantic",
-    # Turns off all implicit conversion warnings. Most are re-enabled below.
-    "-Wno-conversion",
-    "-Wno-covered-switch-default",
-    "-Wno-deprecated",
-    "-Wno-disabled-macro-expansion",
-    "-Wno-double-promotion",
-    ###
-    # Turned off as they include valid C++ code.
-    "-Wno-comma",
-    "-Wno-extra-semi",
-    "-Wno-extra-semi-stmt",
-    "-Wno-packed",
-    "-Wno-padded",
-    ###
-    # Google style does not use unsigned integers, though STL containers
-    # have unsigned types.
-    "-Wno-sign-compare",
-    ###
-    "-Wno-float-conversion",
-    "-Wno-float-equal",
-    "-Wno-format-nonliteral",
-    # Too aggressive: warns on Clang extensions enclosed in Clang-only
-    # compilation paths.
-    "-Wno-gcc-compat",
-    ###
-    # Some internal globals are necessary. Don't do this at home.
-    "-Wno-global-constructors",
-    "-Wno-exit-time-destructors",
-    ###
-    "-Wno-non-modular-include-in-module",
-    "-Wno-old-style-cast",
-    # Warns on preferred usage of non-POD types such as string_view
-    "-Wno-range-loop-analysis",
-    "-Wno-reserved-id-macro",
-    "-Wno-shorten-64-to-32",
-    "-Wno-switch-enum",
-    "-Wno-thread-safety-negative",
-    "-Wno-unknown-warning-option",
-    "-Wno-unreachable-code",
-    # Causes warnings on include guards
-    "-Wno-unused-macros",
-    "-Wno-weak-vtables",
-    # Causes warnings on usage of types/compare.h comparison operators.
-    "-Wno-zero-as-null-pointer-constant",
-    ###
-    # Implicit conversion warnings turned off by -Wno-conversion
-    # which are re-enabled below.
-    "-Wbitfield-enum-conversion",
-    "-Wbool-conversion",
-    "-Wconstant-conversion",
-    "-Wenum-conversion",
-    "-Wint-conversion",
-    "-Wliteral-conversion",
-    "-Wnon-literal-null-conversion",
-    "-Wnull-conversion",
-    "-Wobjc-literal-conversion",
-    "-Wno-sign-conversion",
-    "-Wstring-conversion",
-]
-
-LLVM_TEST_DISABLE_WARNINGS_FLAGS = [
-    "-Wno-c99-extensions",
+ABSL_GCC_TEST_ADDITIONAL_FLAGS = [
     "-Wno-deprecated-declarations",
-    "-Wno-missing-noreturn",
+    "-Wno-missing-declarations",
+    "-Wno-self-move",
+    "-Wno-sign-compare",
+    "-Wno-unused-function",
+    "-Wno-unused-parameter",
+    "-Wno-unused-private-field",
+]
+
+ABSL_LLVM_FLAGS = [
+    "-Wall",
+    "-Wextra",
+    "-Wcast-qual",
+    "-Wconversion",
+    "-Wfloat-overflow-conversion",
+    "-Wfloat-zero-conversion",
+    "-Wfor-loop-analysis",
+    "-Wformat-security",
+    "-Wgnu-redeclared-enum",
+    "-Winfinite-recursion",
+    "-Winvalid-constexpr",
+    "-Wliteral-conversion",
+    "-Wmissing-declarations",
+    "-Woverlength-strings",
+    "-Wpointer-arith",
+    "-Wself-assign",
+    "-Wshadow-all",
+    "-Wshorten-64-to-32",
+    "-Wsign-conversion",
+    "-Wstring-conversion",
+    "-Wtautological-overlap-compare",
+    "-Wtautological-unsigned-zero-compare",
+    "-Wundef",
+    "-Wuninitialized",
+    "-Wunreachable-code",
+    "-Wunused-comparison",
+    "-Wunused-local-typedefs",
+    "-Wunused-result",
+    "-Wvla",
+    "-Wwrite-strings",
+    # Warnings that are enabled by group warning flags like -Wall that we
+    # explicitly disable.
+    "-Wno-float-conversion",
+    "-Wno-implicit-float-conversion",
+    "-Wno-implicit-int-float-conversion",
+    # Disable warnings on unknown warning flags (when warning flags are
+    # unknown on older compiler versions)
+    "-Wno-unknown-warning-option",
+    # Don't define min and max macros (Build on Windows using clang)
+    "-DNOMINMAX",
+]
+
+ABSL_LLVM_TEST_ADDITIONAL_FLAGS = [
+    "-Wno-deprecated-declarations",
+    "-Wno-implicit-int-conversion",
     "-Wno-missing-prototypes",
     "-Wno-missing-variable-declarations",
-    "-Wno-null-conversion",
     "-Wno-shadow",
-    "-Wno-shift-sign-overflow",
+    "-Wno-shorten-64-to-32",
     "-Wno-sign-compare",
+    "-Wno-sign-conversion",
+    "-Wno-unreachable-code-loop-increment",
     "-Wno-unused-function",
     "-Wno-unused-member-function",
     "-Wno-unused-parameter",
     "-Wno-unused-private-field",
     "-Wno-unused-template",
     "-Wno-used-but-marked-unused",
-    "-Wno-zero-as-null-pointer-constant",
     # gtest depends on this GNU extension being offered.
     "-Wno-gnu-zero-variadic-macro-arguments",
 ]
 
+# /Wall with msvc includes unhelpful warnings such as C4711, C4710, ...
+MSVC_BIG_WARNING_FLAGS = [
+    "/W3",
+]
+
+MSVC_WARNING_FLAGS = [
+    # Increase the number of sections available in object files
+    "/bigobj",
+    "/wd4005",  # macro-redefinition
+    "/wd4068",  # unknown pragma
+    # qualifier applied to function type has no meaning; ignored
+    "/wd4180",
+    # conversion from 'type1' to 'type2', possible loss of data
+    "/wd4244",
+    # conversion from 'size_t' to 'type', possible loss of data
+    "/wd4267",
+    # The decorated name was longer than the compiler limit
+    "/wd4503",
+    # forcing value to bool 'true' or 'false' (performance warning)
+    "/wd4800",
+]
+
 MSVC_DEFINES = [
     "/DNOMINMAX",  # Don't define min and max macros (windows.h)
     # Don't bloat namespace with incompatible winsock versions.
@@ -119,75 +136,43 @@
     "/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
 ]
 
+
+def GccStyleFilterAndCombine(default_flags, test_flags):
+  """Merges default_flags and test_flags for GCC and LLVM.
+
+  Args:
+    default_flags: A list of default compiler flags
+    test_flags: A list of flags that are only used in tests
+
+  Returns:
+    A combined list of default_flags and test_flags, but with all flags of the
+    form '-Wwarning' removed if test_flags contains a flag of the form
+    '-Wno-warning'
+  """
+  remove = set(["-W" + f[5:] for f in test_flags if f[:5] == "-Wno-"])
+  return [f for f in default_flags if f not in remove] + test_flags
+
 COPT_VARS = {
-    "ABSL_GCC_FLAGS": [
-        "-Wall",
-        "-Wextra",
-        "-Wcast-qual",
-        "-Wconversion-null",
-        "-Wmissing-declarations",
-        "-Woverlength-strings",
-        "-Wpointer-arith",
-        "-Wundef",
-        "-Wunused-local-typedefs",
-        "-Wunused-result",
-        "-Wvarargs",
-        "-Wvla",  # variable-length array
-        "-Wwrite-strings",
-        # gcc-4.x has spurious missing field initializer warnings.
-        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36750
-        # Remove when gcc-4.x is no longer supported.
-        "-Wno-missing-field-initializers",
-        # Google style does not use unsigned integers, though STL containers
-        # have unsigned types.
-        "-Wno-sign-compare",
-        # Don't define min and max macros (Build on Windows using gcc)
-        "-DNOMINMAX",
-    ],
-    "ABSL_GCC_TEST_FLAGS": [
-        "-Wno-conversion-null",
-        "-Wno-deprecated-declarations",
-        "-Wno-missing-declarations",
-        "-Wno-sign-compare",
-        "-Wno-unused-function",
-        "-Wno-unused-parameter",
-        "-Wno-unused-private-field",
-    ],
-    "ABSL_LLVM_FLAGS":
-        LLVM_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + [
-            # Don't define min and max macros (Build on Windows using clang)
-            "-DNOMINMAX",
-        ],
-    "ABSL_LLVM_TEST_FLAGS":
-        LLVM_TEST_DISABLE_WARNINGS_FLAGS,
+    "ABSL_GCC_FLAGS": ABSL_GCC_FLAGS,
+    "ABSL_GCC_TEST_FLAGS": GccStyleFilterAndCombine(
+        ABSL_GCC_FLAGS, ABSL_GCC_TEST_ADDITIONAL_FLAGS),
+    "ABSL_LLVM_FLAGS": ABSL_LLVM_FLAGS,
+    "ABSL_LLVM_TEST_FLAGS": GccStyleFilterAndCombine(
+        ABSL_LLVM_FLAGS, ABSL_LLVM_TEST_ADDITIONAL_FLAGS),
     "ABSL_CLANG_CL_FLAGS":
-        (MSVC_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + MSVC_DEFINES),
+        MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES,
     "ABSL_CLANG_CL_TEST_FLAGS":
-        LLVM_TEST_DISABLE_WARNINGS_FLAGS,
+        MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES + ABSL_LLVM_TEST_ADDITIONAL_FLAGS,
     "ABSL_MSVC_FLAGS":
-        MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES + [
-            # Increase the number of sections available in object files
-            "/bigobj",
-            "/wd4005",  # macro-redefinition
-            "/wd4068",  # unknown pragma
-            # qualifier applied to function type has no meaning; ignored
-            "/wd4180",
-            # conversion from 'type1' to 'type2', possible loss of data
-            "/wd4244",
-            # conversion from 'size_t' to 'type', possible loss of data
-            "/wd4267",
-            # The decorated name was longer than the compiler limit
-            "/wd4503",
-            # forcing value to bool 'true' or 'false' (performance warning)
-            "/wd4800",
+        MSVC_BIG_WARNING_FLAGS + MSVC_WARNING_FLAGS + MSVC_DEFINES,
+    "ABSL_MSVC_TEST_FLAGS":
+        MSVC_BIG_WARNING_FLAGS + MSVC_WARNING_FLAGS + MSVC_DEFINES + [
+            "/wd4018",  # signed/unsigned mismatch
+            "/wd4101",  # unreferenced local variable
+            "/wd4503",  # decorated name length exceeded, name was truncated
+            "/wd4996",  # use of deprecated symbol
+            "/DNOMINMAX",  # disable the min() and max() macros from <windows.h>
         ],
-    "ABSL_MSVC_TEST_FLAGS": [
-        "/wd4018",  # signed/unsigned mismatch
-        "/wd4101",  # unreferenced local variable
-        "/wd4503",  # decorated name length exceeded, name was truncated
-        "/wd4996",  # use of deprecated symbol
-        "/DNOMINMAX",  # disable the min() and max() macros from <windows.h>
-    ],
     "ABSL_MSVC_LINKOPTS": [
         # Object file doesn't export any previously undefined symbols
         "-ignore:4221",
diff --git a/abseil-cpp/absl/copts/generate_copts.py b/abseil-cpp/absl/copts/generate_copts.py
index 0e5dc9f..34be2fc 100755
--- a/abseil-cpp/absl/copts/generate_copts.py
+++ b/abseil-cpp/absl/copts/generate_copts.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 """Generate Abseil compile compile option configs.
 
 Usage: <path_to_absl>/copts/generate_copts.py
diff --git a/abseil-cpp/absl/crc/BUILD.bazel b/abseil-cpp/absl/crc/BUILD.bazel
new file mode 100644
index 0000000..cdbaa9b
--- /dev/null
+++ b/abseil-cpp/absl/crc/BUILD.bazel
@@ -0,0 +1,212 @@
+# Copyright 2022 The Abseil Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = ["//visibility:private"])
+
+licenses(["notice"])
+
+cc_library(
+    name = "cpu_detect",
+    srcs = [
+        "internal/cpu_detect.cc",
+    ],
+    hdrs = ["internal/cpu_detect.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        "//absl/base",
+        "//absl/base:config",
+    ],
+)
+
+cc_library(
+    name = "crc_internal",
+    srcs = [
+        "internal/crc.cc",
+        "internal/crc_internal.h",
+        "internal/crc_x86_arm_combined.cc",
+    ],
+    hdrs = [
+        "internal/crc.h",
+        "internal/crc32_x86_arm_combined_simd.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cpu_detect",
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:dynamic_annotations",
+        "//absl/base:endian",
+        "//absl/base:prefetch",
+        "//absl/base:raw_logging_internal",
+        "//absl/memory",
+        "//absl/numeric:bits",
+    ],
+)
+
+cc_library(
+    name = "crc32c",
+    srcs = [
+        "crc32c.cc",
+        "internal/crc32c_inline.h",
+        "internal/crc_memcpy_fallback.cc",
+        "internal/crc_memcpy_x86_64.cc",
+        "internal/crc_non_temporal_memcpy.cc",
+    ],
+    hdrs = [
+        "crc32c.h",
+        "internal/crc32c.h",
+        "internal/crc_memcpy.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:public"],
+    deps = [
+        ":cpu_detect",
+        ":crc_internal",
+        ":non_temporal_memcpy",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:dynamic_annotations",
+        "//absl/base:endian",
+        "//absl/base:prefetch",
+        "//absl/strings",
+        "//absl/strings:str_format",
+    ],
+)
+
+cc_test(
+    name = "crc32c_test",
+    srcs = ["crc32c_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":crc32c",
+        "//absl/strings",
+        "//absl/strings:str_format",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "non_temporal_arm_intrinsics",
+    hdrs = ["internal/non_temporal_arm_intrinsics.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        "//absl/base:config",
+    ],
+)
+
+cc_library(
+    name = "non_temporal_memcpy",
+    hdrs = ["internal/non_temporal_memcpy.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":non_temporal_arm_intrinsics",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_test(
+    name = "crc_memcpy_test",
+    size = "large",
+    srcs = ["internal/crc_memcpy_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    shard_count = 3,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":crc32c",
+        "//absl/memory",
+        "//absl/random",
+        "//absl/random:distributions",
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "non_temporal_memcpy_test",
+    srcs = ["internal/non_temporal_memcpy_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":non_temporal_memcpy",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "crc_cord_state",
+    srcs = ["internal/crc_cord_state.cc"],
+    hdrs = ["internal/crc_cord_state.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//absl/strings:__pkg__"],
+    deps = [
+        ":crc32c",
+        "//absl/base:config",
+        "//absl/numeric:bits",
+        "//absl/strings",
+    ],
+)
+
+cc_test(
+    name = "crc_cord_state_test",
+    srcs = ["internal/crc_cord_state_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":crc32c",
+        ":crc_cord_state",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "crc32c_benchmark",
+    testonly = 1,
+    srcs = ["crc32c_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "benchmark",
+    ],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":crc32c",
+        "//absl/memory",
+        "//absl/strings",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
diff --git a/abseil-cpp/absl/crc/CMakeLists.txt b/abseil-cpp/absl/crc/CMakeLists.txt
new file mode 100644
index 0000000..2124716
--- /dev/null
+++ b/abseil-cpp/absl/crc/CMakeLists.txt
@@ -0,0 +1,178 @@
+# Copyright 2022 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    crc_cpu_detect
+  HDRS
+    "internal/cpu_detect.h"
+  SRCS
+    "internal/cpu_detect.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::config
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    crc_internal
+  HDRS
+    "internal/crc.h"
+    "internal/crc32_x86_arm_combined_simd.h"
+  SRCS
+    "internal/crc.cc"
+    "internal/crc_internal.h"
+    "internal/crc_x86_arm_combined.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::crc_cpu_detect
+    absl::base
+    absl::config
+    absl::core_headers
+    absl::dynamic_annotations
+    absl::endian
+    absl::prefetch
+    absl::raw_logging_internal
+    absl::memory
+    absl::bits
+)
+
+absl_cc_library(
+  NAME
+    crc32c
+  HDRS
+    "crc32c.h"
+    "internal/crc32c.h"
+    "internal/crc_memcpy.h"
+  SRCS
+    "crc32c.cc"
+    "internal/crc32c_inline.h"
+    "internal/crc_memcpy_fallback.cc"
+    "internal/crc_memcpy_x86_64.cc"
+    "internal/crc_non_temporal_memcpy.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::crc_cpu_detect
+    absl::crc_internal
+    absl::non_temporal_memcpy
+    absl::config
+    absl::core_headers
+    absl::dynamic_annotations
+    absl::endian
+    absl::prefetch
+    absl::str_format
+    absl::strings
+)
+
+absl_cc_test(
+  NAME
+    crc32c_test
+  SRCS
+    "crc32c_test.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::crc32c
+    absl::strings
+    absl::str_format
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    non_temporal_arm_intrinsics
+  HDRS
+    "internal/non_temporal_arm_intrinsics.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    non_temporal_memcpy
+  HDRS
+    "internal/non_temporal_memcpy.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::non_temporal_arm_intrinsics
+    absl::config
+    absl::core_headers
+)
+
+absl_cc_test(
+  NAME
+    crc_memcpy_test
+  SRCS
+    "internal/crc_memcpy_test.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::crc32c
+    absl::memory
+    absl::random_random
+    absl::random_distributions
+    absl::strings
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    non_temporal_memcpy_test
+  SRCS
+    "internal/non_temporal_memcpy_test.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::non_temporal_memcpy
+    GTest::gtest_main
+)
+
+absl_cc_library(
+  NAME
+    crc_cord_state
+  HDRS
+    "internal/crc_cord_state.h"
+  SRCS
+    "internal/crc_cord_state.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::crc32c
+    absl::config
+    absl::strings
+)
+
+absl_cc_test(
+  NAME
+    crc_cord_state_test
+  SRCS
+    "internal/crc_cord_state_test.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::crc_cord_state
+    absl::crc32c
+    GTest::gtest_main
+)
diff --git a/abseil-cpp/absl/crc/crc32c.cc b/abseil-cpp/absl/crc/crc32c.cc
new file mode 100644
index 0000000..468c1b3
--- /dev/null
+++ b/abseil-cpp/absl/crc/crc32c.cc
@@ -0,0 +1,99 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/crc32c.h"
+
+#include <cstdint>
+
+#include "absl/crc/internal/crc.h"
+#include "absl/crc/internal/crc32c.h"
+#include "absl/crc/internal/crc_memcpy.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+const crc_internal::CRC* CrcEngine() {
+  static const crc_internal::CRC* engine = crc_internal::CRC::Crc32c();
+  return engine;
+}
+
+constexpr uint32_t kCRC32Xor = 0xffffffffU;
+
+}  // namespace
+
+namespace crc_internal {
+
+crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {
+  uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
+  CrcEngine()->UnextendByZeroes(&crc, length);
+  return static_cast<crc32c_t>(crc ^ kCRC32Xor);
+}
+
+// Called by `absl::ExtendCrc32c()` on strings with size > 64 or when hardware
+// CRC32C support is missing.
+crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc,
+                              absl::string_view buf_to_add) {
+  uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
+  CrcEngine()->Extend(&crc, buf_to_add.data(), buf_to_add.size());
+  return static_cast<crc32c_t>(crc ^ kCRC32Xor);
+}
+
+}  // namespace crc_internal
+
+crc32c_t ComputeCrc32c(absl::string_view buf) {
+  return ExtendCrc32c(crc32c_t{0}, buf);
+}
+
+crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {
+  uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
+  CrcEngine()->ExtendByZeroes(&crc, length);
+  return static_cast<crc32c_t>(crc ^ kCRC32Xor);
+}
+
+crc32c_t ConcatCrc32c(crc32c_t lhs_crc, crc32c_t rhs_crc, size_t rhs_len) {
+  uint32_t result = static_cast<uint32_t>(lhs_crc);
+  CrcEngine()->ExtendByZeroes(&result, rhs_len);
+  return crc32c_t{result ^ static_cast<uint32_t>(rhs_crc)};
+}
+
+crc32c_t RemoveCrc32cPrefix(crc32c_t crc_a, crc32c_t crc_ab, size_t length_b) {
+  return ConcatCrc32c(crc_a, crc_ab, length_b);
+}
+
+crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count,
+                      crc32c_t initial_crc) {
+  return static_cast<crc32c_t>(
+      crc_internal::Crc32CAndCopy(dest, src, count, initial_crc, false));
+}
+
+// Remove a Suffix of given size from a buffer
+//
+// Given a CRC32C of an existing buffer, `full_string_crc`; the CRC32C of a
+// suffix of that buffer to remove, `suffix_crc`; and suffix buffer's length,
+// `suffix_len` return the CRC32C of the buffer with suffix removed
+//
+// This operation has a runtime cost of O(log(`suffix_len`))
+crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc,
+                            size_t suffix_len) {
+  uint32_t result = static_cast<uint32_t>(full_string_crc) ^
+                    static_cast<uint32_t>(suffix_crc);
+  CrcEngine()->UnextendByZeroes(&result, suffix_len);
+  return crc32c_t{result};
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/crc32c.h b/abseil-cpp/absl/crc/crc32c.h
new file mode 100644
index 0000000..362861e
--- /dev/null
+++ b/abseil-cpp/absl/crc/crc32c.h
@@ -0,0 +1,190 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: crc32c.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for computing CRC32C values as checksums
+// for arbitrary sequences of bytes provided as a string buffer.
+//
+// The API includes the basic functions for computing such CRC32C values and
+// some utility functions for performing more efficient mathematical
+// computations using an existing checksum.
+#ifndef ABSL_CRC_CRC32C_H_
+#define ABSL_CRC_CRC32C_H_
+
+#include <cstdint>
+#include <ostream>
+
+#include "absl/crc/internal/crc32c_inline.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+//-----------------------------------------------------------------------------
+// crc32c_t
+//-----------------------------------------------------------------------------
+
+// `crc32c_t` defines a strongly-typed integer for holding a CRC32C value.
+//
+// Some operators are intentionally omitted. Only equality operators are defined
+// so that `crc32c_t` can be directly compared. Methods for putting `crc32c_t`
+// directly into a set are omitted because this is bug-prone due to checksum
+// collisions. Use an explicit conversion to the `uint32_t` space for operations
+// that treat `crc32c_t` as an integer.
+class crc32c_t final {
+ public:
+  crc32c_t() = default;
+  constexpr explicit crc32c_t(uint32_t crc) : crc_(crc) {}
+
+  crc32c_t(const crc32c_t&) = default;
+  crc32c_t& operator=(const crc32c_t&) = default;
+
+  explicit operator uint32_t() const { return crc_; }
+
+  friend bool operator==(crc32c_t lhs, crc32c_t rhs) {
+    return static_cast<uint32_t>(lhs) == static_cast<uint32_t>(rhs);
+  }
+
+  friend bool operator!=(crc32c_t lhs, crc32c_t rhs) { return !(lhs == rhs); }
+
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, crc32c_t crc) {
+    absl::Format(&sink, "%08x", static_cast<uint32_t>(crc));
+  }
+
+ private:
+  uint32_t crc_;
+};
+
+
+namespace crc_internal {
+// Non-inline code path for `absl::ExtendCrc32c()`. Do not call directly.
+// Call `absl::ExtendCrc32c()` (defined below) instead.
+crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc,
+                              absl::string_view buf_to_add);
+}  // namespace crc_internal
+
+// -----------------------------------------------------------------------------
+// CRC32C Computation Functions
+// -----------------------------------------------------------------------------
+
+// ComputeCrc32c()
+//
+// Returns the CRC32C value of the provided string.
+crc32c_t ComputeCrc32c(absl::string_view buf);
+
+// ExtendCrc32c()
+//
+// Computes a CRC32C value from an `initial_crc` CRC32C value including the
+// `buf_to_add` bytes of an additional buffer. Using this function is more
+// efficient than computing a CRC32C value for the combined buffer from
+// scratch.
+//
+// Note: `ExtendCrc32c` with an initial_crc of 0 is equivalent to
+// `ComputeCrc32c`.
+//
+// This operation has a runtime cost of O(`buf_to_add.size()`)
+inline crc32c_t ExtendCrc32c(crc32c_t initial_crc,
+                             absl::string_view buf_to_add) {
+  // Approximately 75% of calls have size <= 64.
+  if (buf_to_add.size() <= 64) {
+    uint32_t crc = static_cast<uint32_t>(initial_crc);
+    if (crc_internal::ExtendCrc32cInline(&crc, buf_to_add.data(),
+                                         buf_to_add.size())) {
+      return crc32c_t{crc};
+    }
+  }
+  return crc_internal::ExtendCrc32cInternal(initial_crc, buf_to_add);
+}
+
+// ExtendCrc32cByZeroes()
+//
+// Computes a CRC32C value for a buffer with an `initial_crc` CRC32C value,
+// where `length` bytes with a value of 0 are appended to the buffer. Using this
+// function is more efficient than computing a CRC32C value for the combined
+// buffer from scratch.
+//
+// This operation has a runtime cost of O(log(`length`))
+crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length);
+
+// MemcpyCrc32c()
+//
+// Copies `src` to `dest` using `memcpy()` semantics, returning the CRC32C
+// value of the copied buffer.
+//
+// Using `MemcpyCrc32c()` is potentially faster than performing the `memcpy()`
+// and `ComputeCrc32c()` operations separately.
+crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count,
+                      crc32c_t initial_crc = crc32c_t{0});
+
+// -----------------------------------------------------------------------------
+// CRC32C Arithmetic Functions
+// -----------------------------------------------------------------------------
+
+// The following functions perform arithmetic on CRC32C values, which are
+// generally more efficient than recalculating any given result's CRC32C value.
+
+// ConcatCrc32c()
+//
+// Calculates the CRC32C value of two buffers with known CRC32C values
+// concatenated together.
+//
+// Given a buffer with CRC32C value `crc1` and a buffer with
+// CRC32C value `crc2` and length, `crc2_length`, returns the CRC32C value of
+// the concatenation of these two buffers.
+//
+// This operation has a runtime cost of O(log(`crc2_length`)).
+crc32c_t ConcatCrc32c(crc32c_t crc1, crc32c_t crc2, size_t crc2_length);
+
+// RemoveCrc32cPrefix()
+//
+// Calculates the CRC32C value of an existing buffer with a series of bytes
+// (the prefix) removed from the beginning of that buffer.
+//
+// Given the CRC32C value of an existing buffer, `full_string_crc`; The CRC32C
+// value of a prefix of that buffer, `prefix_crc`; and the length of the buffer
+// with the prefix removed, `remaining_string_length` , return the CRC32C
+// value of the buffer with the prefix removed.
+//
+// This operation has a runtime cost of O(log(`remaining_string_length`)).
+crc32c_t RemoveCrc32cPrefix(crc32c_t prefix_crc, crc32c_t full_string_crc,
+                            size_t remaining_string_length);
+// RemoveCrc32cSuffix()
+//
+// Calculates the CRC32C value of an existing buffer with a series of bytes
+// (the suffix) removed from the end of that buffer.
+//
+// Given a CRC32C value of an existing buffer `full_string_crc`, the CRC32C
+// value of the suffix to remove `suffix_crc`, and the length of that suffix
+// `suffix_len`, returns the CRC32C value of the buffer with suffix removed.
+//
+// This operation has a runtime cost of O(log(`suffix_len`))
+crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc,
+                            size_t suffix_length);
+
+// operator<<
+//
+// Streams the CRC32C value `crc` to the stream `os`.
+inline std::ostream& operator<<(std::ostream& os, crc32c_t crc) {
+  return os << absl::StreamFormat("%08x", static_cast<uint32_t>(crc));
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_CRC32C_H_
diff --git a/abseil-cpp/absl/crc/crc32c_benchmark.cc b/abseil-cpp/absl/crc/crc32c_benchmark.cc
new file mode 100644
index 0000000..3b46ef3
--- /dev/null
+++ b/abseil-cpp/absl/crc/crc32c_benchmark.cc
@@ -0,0 +1,183 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+
+#include "absl/crc/crc32c.h"
+#include "absl/crc/internal/crc32c.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/string_view.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+std::string TestString(size_t len) {
+  std::string result;
+  result.reserve(len);
+  for (size_t i = 0; i < len; ++i) {
+    result.push_back(static_cast<char>(i % 256));
+  }
+  return result;
+}
+
+void BM_Calculate(benchmark::State& state) {
+  int len = state.range(0);
+  std::string data = TestString(len);
+  for (auto s : state) {
+    benchmark::DoNotOptimize(data);
+    absl::crc32c_t crc = absl::ComputeCrc32c(data);
+    benchmark::DoNotOptimize(crc);
+  }
+}
+BENCHMARK(BM_Calculate)->Arg(0)->Arg(1)->Arg(100)->Arg(10000)->Arg(500000);
+
+void BM_Extend(benchmark::State& state) {
+  int len = state.range(0);
+  std::string extension = TestString(len);
+  absl::crc32c_t base = absl::crc32c_t{0xC99465AA};  // CRC32C of "Hello World"
+  for (auto s : state) {
+    benchmark::DoNotOptimize(base);
+    benchmark::DoNotOptimize(extension);
+    absl::crc32c_t crc = absl::ExtendCrc32c(base, extension);
+    benchmark::DoNotOptimize(crc);
+  }
+}
+BENCHMARK(BM_Extend)->Arg(0)->Arg(1)->Arg(100)->Arg(10000)->Arg(500000)->Arg(
+    100 * 1000 * 1000);
+
+// Make working set >> CPU cache size to benchmark prefetches better
+void BM_ExtendCacheMiss(benchmark::State& state) {
+  int len = state.range(0);
+  constexpr int total = 300 * 1000 * 1000;
+  std::string extension = TestString(total);
+  absl::crc32c_t base = absl::crc32c_t{0xC99465AA};  // CRC32C of "Hello World"
+  for (auto s : state) {
+    for (int i = 0; i < total; i += len * 2) {
+      benchmark::DoNotOptimize(base);
+      benchmark::DoNotOptimize(extension);
+      absl::crc32c_t crc =
+          absl::ExtendCrc32c(base, absl::string_view(&extension[i], len));
+      benchmark::DoNotOptimize(crc);
+    }
+  }
+  state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * total / 2);
+}
+BENCHMARK(BM_ExtendCacheMiss)->Arg(10)->Arg(100)->Arg(1000)->Arg(100000);
+
+void BM_ExtendByZeroes(benchmark::State& state) {
+  absl::crc32c_t base = absl::crc32c_t{0xC99465AA};  // CRC32C of "Hello World"
+  int num_zeroes = state.range(0);
+  for (auto s : state) {
+    benchmark::DoNotOptimize(base);
+    absl::crc32c_t crc = absl::ExtendCrc32cByZeroes(base, num_zeroes);
+    benchmark::DoNotOptimize(crc);
+  }
+}
+BENCHMARK(BM_ExtendByZeroes)
+    ->RangeMultiplier(10)
+    ->Range(1, 1000000)
+    ->RangeMultiplier(32)
+    ->Range(1, 1 << 20);
+
+void BM_UnextendByZeroes(benchmark::State& state) {
+  absl::crc32c_t base = absl::crc32c_t{0xdeadbeef};
+  int num_zeroes = state.range(0);
+  for (auto s : state) {
+    benchmark::DoNotOptimize(base);
+    absl::crc32c_t crc =
+        absl::crc_internal::UnextendCrc32cByZeroes(base, num_zeroes);
+    benchmark::DoNotOptimize(crc);
+  }
+}
+BENCHMARK(BM_UnextendByZeroes)
+    ->RangeMultiplier(10)
+    ->Range(1, 1000000)
+    ->RangeMultiplier(32)
+    ->Range(1, 1 << 20);
+
+void BM_Concat(benchmark::State& state) {
+  int string_b_len = state.range(0);
+  std::string string_b = TestString(string_b_len);
+
+  // CRC32C of "Hello World"
+  absl::crc32c_t crc_a = absl::crc32c_t{0xC99465AA};
+  absl::crc32c_t crc_b = absl::ComputeCrc32c(string_b);
+
+  for (auto s : state) {
+    benchmark::DoNotOptimize(crc_a);
+    benchmark::DoNotOptimize(crc_b);
+    benchmark::DoNotOptimize(string_b_len);
+    absl::crc32c_t crc_ab = absl::ConcatCrc32c(crc_a, crc_b, string_b_len);
+    benchmark::DoNotOptimize(crc_ab);
+  }
+}
+BENCHMARK(BM_Concat)
+    ->RangeMultiplier(10)
+    ->Range(1, 1000000)
+    ->RangeMultiplier(32)
+    ->Range(1, 1 << 20);
+
+void BM_Memcpy(benchmark::State& state) {
+  int string_len = state.range(0);
+
+  std::string source = TestString(string_len);
+  auto dest = absl::make_unique<char[]>(string_len);
+
+  for (auto s : state) {
+    benchmark::DoNotOptimize(source);
+    absl::crc32c_t crc =
+        absl::MemcpyCrc32c(dest.get(), source.data(), source.size());
+    benchmark::DoNotOptimize(crc);
+    benchmark::DoNotOptimize(dest);
+    benchmark::DoNotOptimize(dest.get());
+    benchmark::DoNotOptimize(dest[0]);
+  }
+
+  state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
+                          state.range(0));
+}
+BENCHMARK(BM_Memcpy)->Arg(0)->Arg(1)->Arg(100)->Arg(10000)->Arg(500000);
+
+void BM_RemoveSuffix(benchmark::State& state) {
+  int full_string_len = state.range(0);
+  int suffix_len = state.range(1);
+
+  std::string full_string = TestString(full_string_len);
+  std::string suffix = full_string.substr(
+    full_string_len - suffix_len, full_string_len);
+
+  absl::crc32c_t full_string_crc = absl::ComputeCrc32c(full_string);
+  absl::crc32c_t suffix_crc = absl::ComputeCrc32c(suffix);
+
+  for (auto s : state) {
+    benchmark::DoNotOptimize(full_string_crc);
+    benchmark::DoNotOptimize(suffix_crc);
+    benchmark::DoNotOptimize(suffix_len);
+    absl::crc32c_t crc = absl::RemoveCrc32cSuffix(full_string_crc, suffix_crc,
+      suffix_len);
+    benchmark::DoNotOptimize(crc);
+  }
+}
+BENCHMARK(BM_RemoveSuffix)
+    ->ArgPair(1, 1)
+    ->ArgPair(100, 10)
+    ->ArgPair(100, 100)
+    ->ArgPair(10000, 1)
+    ->ArgPair(10000, 100)
+    ->ArgPair(10000, 10000)
+    ->ArgPair(500000, 1)
+    ->ArgPair(500000, 100)
+    ->ArgPair(500000, 10000)
+    ->ArgPair(500000, 500000);
+}  // namespace
diff --git a/abseil-cpp/absl/crc/crc32c_test.cc b/abseil-cpp/absl/crc/crc32c_test.cc
new file mode 100644
index 0000000..df0afb3
--- /dev/null
+++ b/abseil-cpp/absl/crc/crc32c_test.cc
@@ -0,0 +1,227 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/crc32c.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <sstream>
+#include <string>
+
+#include "gtest/gtest.h"
+#include "absl/crc/internal/crc32c.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+
+namespace {
+
+TEST(CRC32C, RFC3720) {
+  // Test the results of the vectors from
+  // https://www.rfc-editor.org/rfc/rfc3720#appendix-B.4
+  char data[32];
+
+  // 32 bytes of ones.
+  memset(data, 0, sizeof(data));
+  EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
+            absl::crc32c_t{0x8a9136aa});
+
+  // 32 bytes of ones.
+  memset(data, 0xff, sizeof(data));
+  EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
+            absl::crc32c_t{0x62a8ab43});
+
+  // 32 incrementing bytes.
+  for (int i = 0; i < 32; ++i) data[i] = static_cast<char>(i);
+  EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
+            absl::crc32c_t{0x46dd794e});
+
+  // 32 decrementing bytes.
+  for (int i = 0; i < 32; ++i) data[i] = static_cast<char>(31 - i);
+  EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
+            absl::crc32c_t{0x113fdb5c});
+
+  // An iSCSI - SCSI Read (10) Command PDU.
+  constexpr uint8_t cmd[48] = {
+      0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+      0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  };
+  EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(
+                reinterpret_cast<const char*>(cmd), sizeof(cmd))),
+            absl::crc32c_t{0xd9963a56});
+}
+
+std::string TestString(size_t len) {
+  std::string result;
+  result.reserve(len);
+  for (size_t i = 0; i < len; ++i) {
+    result.push_back(static_cast<char>(i % 256));
+  }
+  return result;
+}
+
+TEST(CRC32C, Compute) {
+  EXPECT_EQ(absl::ComputeCrc32c(""), absl::crc32c_t{0});
+  EXPECT_EQ(absl::ComputeCrc32c("hello world"), absl::crc32c_t{0xc99465aa});
+}
+
+TEST(CRC32C, Extend) {
+  uint32_t base = 0xC99465AA;  // CRC32C of "Hello World"
+  std::string extension = "Extension String";
+
+  EXPECT_EQ(
+      absl::ExtendCrc32c(absl::crc32c_t{base}, extension),
+      absl::crc32c_t{0xD2F65090});  // CRC32C of "Hello WorldExtension String"
+}
+
+TEST(CRC32C, ExtendByZeroes) {
+  std::string base = "hello world";
+  absl::crc32c_t base_crc = absl::crc32c_t{0xc99465aa};
+
+  constexpr size_t kExtendByValues[] = {100, 10000, 100000};
+  for (const size_t extend_by : kExtendByValues) {
+    SCOPED_TRACE(extend_by);
+    absl::crc32c_t crc2 = absl::ExtendCrc32cByZeroes(base_crc, extend_by);
+    EXPECT_EQ(crc2, absl::ComputeCrc32c(base + std::string(extend_by, '\0')));
+  }
+}
+
+TEST(CRC32C, UnextendByZeroes) {
+  constexpr size_t kExtendByValues[] = {2, 200, 20000, 200000, 20000000};
+  constexpr size_t kUnextendByValues[] = {0, 100, 10000, 100000, 10000000};
+
+  for (auto seed_crc : {absl::crc32c_t{0}, absl::crc32c_t{0xc99465aa}}) {
+    SCOPED_TRACE(seed_crc);
+    for (const size_t size_1 : kExtendByValues) {
+      for (const size_t size_2 : kUnextendByValues) {
+        size_t extend_size = std::max(size_1, size_2);
+        size_t unextend_size = std::min(size_1, size_2);
+        SCOPED_TRACE(extend_size);
+        SCOPED_TRACE(unextend_size);
+
+        // Extending by A zeroes an unextending by B<A zeros should be identical
+        // to extending by A-B zeroes.
+        absl::crc32c_t crc1 = seed_crc;
+        crc1 = absl::ExtendCrc32cByZeroes(crc1, extend_size);
+        crc1 = absl::crc_internal::UnextendCrc32cByZeroes(crc1, unextend_size);
+
+        absl::crc32c_t crc2 = seed_crc;
+        crc2 = absl::ExtendCrc32cByZeroes(crc2, extend_size - unextend_size);
+
+        EXPECT_EQ(crc1, crc2);
+      }
+    }
+  }
+
+  constexpr size_t kSizes[] = {0, 1, 100, 10000};
+  for (const size_t size : kSizes) {
+    SCOPED_TRACE(size);
+    std::string string_before = TestString(size);
+    std::string string_after = string_before + std::string(size, '\0');
+
+    absl::crc32c_t crc_before = absl::ComputeCrc32c(string_before);
+    absl::crc32c_t crc_after = absl::ComputeCrc32c(string_after);
+
+    EXPECT_EQ(crc_before,
+              absl::crc_internal::UnextendCrc32cByZeroes(crc_after, size));
+  }
+}
+
+TEST(CRC32C, Concat) {
+  std::string hello = "Hello, ";
+  std::string world = "world!";
+  std::string hello_world = absl::StrCat(hello, world);
+
+  absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);
+  absl::crc32c_t crc_b = absl::ComputeCrc32c(world);
+  absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);
+
+  EXPECT_EQ(absl::ConcatCrc32c(crc_a, crc_b, world.size()), crc_ab);
+}
+
+TEST(CRC32C, Memcpy) {
+  constexpr size_t kBytesSize[] = {0, 1, 20, 500, 100000};
+  for (size_t bytes : kBytesSize) {
+    SCOPED_TRACE(bytes);
+    std::string sample_string = TestString(bytes);
+    std::string target_buffer = std::string(bytes, '\0');
+
+    absl::crc32c_t memcpy_crc =
+        absl::MemcpyCrc32c(&(target_buffer[0]), sample_string.data(), bytes);
+    absl::crc32c_t compute_crc = absl::ComputeCrc32c(sample_string);
+
+    EXPECT_EQ(memcpy_crc, compute_crc);
+    EXPECT_EQ(sample_string, target_buffer);
+  }
+}
+
+TEST(CRC32C, RemovePrefix) {
+  std::string hello = "Hello, ";
+  std::string world = "world!";
+  std::string hello_world = absl::StrCat(hello, world);
+
+  absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);
+  absl::crc32c_t crc_b = absl::ComputeCrc32c(world);
+  absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);
+
+  EXPECT_EQ(absl::RemoveCrc32cPrefix(crc_a, crc_ab, world.size()), crc_b);
+}
+
+TEST(CRC32C, RemoveSuffix) {
+  std::string hello = "Hello, ";
+  std::string world = "world!";
+  std::string hello_world = absl::StrCat(hello, world);
+
+  absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);
+  absl::crc32c_t crc_b = absl::ComputeCrc32c(world);
+  absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);
+
+  EXPECT_EQ(absl::RemoveCrc32cSuffix(crc_ab, crc_b, world.size()), crc_a);
+}
+
+TEST(CRC32C, InsertionOperator) {
+  {
+    std::ostringstream buf;
+    buf << absl::crc32c_t{0xc99465aa};
+    EXPECT_EQ(buf.str(), "c99465aa");
+  }
+  {
+    std::ostringstream buf;
+    buf << absl::crc32c_t{0};
+    EXPECT_EQ(buf.str(), "00000000");
+  }
+  {
+    std::ostringstream buf;
+    buf << absl::crc32c_t{17};
+    EXPECT_EQ(buf.str(), "00000011");
+  }
+}
+
+TEST(CRC32C, AbslStringify) {
+  // StrFormat
+  EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0xc99465aa}), "c99465aa");
+  EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0}), "00000000");
+  EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{17}), "00000011");
+
+  // StrCat
+  EXPECT_EQ(absl::StrCat(absl::crc32c_t{0xc99465aa}), "c99465aa");
+  EXPECT_EQ(absl::StrCat(absl::crc32c_t{0}), "00000000");
+  EXPECT_EQ(absl::StrCat(absl::crc32c_t{17}), "00000011");
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/crc/internal/cpu_detect.cc b/abseil-cpp/absl/crc/internal/cpu_detect.cc
new file mode 100644
index 0000000..8383808
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/cpu_detect.cc
@@ -0,0 +1,262 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/internal/cpu_detect.h"
+
+#include <cstdint>
+#include <string>
+
+#include "absl/base/config.h"
+
+#if defined(__aarch64__) && defined(__linux__)
+#include <asm/hwcap.h>
+#include <sys/auxv.h>
+#endif
+
+#if defined(_WIN32) || defined(_WIN64)
+#include <intrin.h>
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+#if ABSL_HAVE_BUILTIN(__cpuid)
+// MSVC-equivalent __cpuid intrinsic declaration for clang-like compilers
+// for non-Windows build environments.
+extern void __cpuid(int[4], int);
+#elif !defined(_WIN32) && !defined(_WIN64)
+// MSVC defines this function for us.
+// https://learn.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex
+static void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile("cpuid \n\t"
+                   : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+                     "=d"(cpu_info[3])
+                   : "a"(info_type), "c"(0));
+}
+#endif  // !defined(_WIN32) && !defined(_WIN64)
+#endif  // defined(__x86_64__) || defined(_M_X64)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+#if defined(__x86_64__) || defined(_M_X64)
+
+namespace {
+
+enum class Vendor {
+  kUnknown,
+  kIntel,
+  kAmd,
+};
+
+Vendor GetVendor() {
+  // Get the vendor string (issue CPUID with eax = 0).
+  int cpu_info[4];
+  __cpuid(cpu_info, 0);
+
+  std::string vendor;
+  vendor.append(reinterpret_cast<char*>(&cpu_info[1]), 4);
+  vendor.append(reinterpret_cast<char*>(&cpu_info[3]), 4);
+  vendor.append(reinterpret_cast<char*>(&cpu_info[2]), 4);
+  if (vendor == "GenuineIntel") {
+    return Vendor::kIntel;
+  } else if (vendor == "AuthenticAMD") {
+    return Vendor::kAmd;
+  } else {
+    return Vendor::kUnknown;
+  }
+}
+
+CpuType GetIntelCpuType() {
+  // To get general information and extended features we send eax = 1 and
+  // ecx = 0 to cpuid.  The response is returned in eax, ebx, ecx and edx.
+  // (See Intel 64 and IA-32 Architectures Software Developer's Manual
+  // Volume 2A: Instruction Set Reference, A-M CPUID).
+  // https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2a-manual.html
+  // https://learn.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex
+  int cpu_info[4];
+  __cpuid(cpu_info, 1);
+
+  // Response in eax bits as follows:
+  // 0-3 (stepping id)
+  // 4-7 (model number),
+  // 8-11 (family code),
+  // 12-13 (processor type),
+  // 16-19 (extended model)
+  // 20-27 (extended family)
+
+  int family = (cpu_info[0] >> 8) & 0x0f;
+  int model_num = (cpu_info[0] >> 4) & 0x0f;
+  int ext_family = (cpu_info[0] >> 20) & 0xff;
+  int ext_model_num = (cpu_info[0] >> 16) & 0x0f;
+
+  int brand_id = cpu_info[1] & 0xff;
+
+  // Process the extended family and model info if necessary
+  if (family == 0x0f) {
+    family += ext_family;
+  }
+
+  if (family == 0x0f || family == 0x6) {
+    model_num += (ext_model_num << 4);
+  }
+
+  switch (brand_id) {
+    case 0:  // no brand ID, so parse CPU family/model
+      switch (family) {
+        case 6:  // Most PentiumIII processors are in this category
+          switch (model_num) {
+            case 0x2c:  // Westmere: Gulftown
+              return CpuType::kIntelWestmere;
+            case 0x2d:  // Sandybridge
+              return CpuType::kIntelSandybridge;
+            case 0x3e:  // Ivybridge
+              return CpuType::kIntelIvybridge;
+            case 0x3c:  // Haswell (client)
+            case 0x3f:  // Haswell
+              return CpuType::kIntelHaswell;
+            case 0x4f:  // Broadwell
+            case 0x56:  // BroadwellDE
+              return CpuType::kIntelBroadwell;
+            case 0x55:                 // Skylake Xeon
+              if ((cpu_info[0] & 0x0f) < 5) {  // stepping < 5 is skylake
+                return CpuType::kIntelSkylakeXeon;
+              } else {  // stepping >= 5 is cascadelake
+                return CpuType::kIntelCascadelakeXeon;
+              }
+            case 0x5e:  // Skylake (client)
+              return CpuType::kIntelSkylake;
+            default:
+              return CpuType::kUnknown;
+          }
+        default:
+          return CpuType::kUnknown;
+      }
+    default:
+      return CpuType::kUnknown;
+  }
+}
+
+CpuType GetAmdCpuType() {
+  // To get general information and extended features we send eax = 1 and
+  // ecx = 0 to cpuid.  The response is returned in eax, ebx, ecx and edx.
+  // (See Intel 64 and IA-32 Architectures Software Developer's Manual
+  // Volume 2A: Instruction Set Reference, A-M CPUID).
+  // https://learn.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex
+  int cpu_info[4];
+  __cpuid(cpu_info, 1);
+
+  // Response in eax bits as follows:
+  // 0-3 (stepping id)
+  // 4-7 (model number),
+  // 8-11 (family code),
+  // 12-13 (processor type),
+  // 16-19 (extended model)
+  // 20-27 (extended family)
+
+  int family = (cpu_info[0] >> 8) & 0x0f;
+  int model_num = (cpu_info[0] >> 4) & 0x0f;
+  int ext_family = (cpu_info[0] >> 20) & 0xff;
+  int ext_model_num = (cpu_info[0] >> 16) & 0x0f;
+
+  if (family == 0x0f) {
+    family += ext_family;
+    model_num += (ext_model_num << 4);
+  }
+
+  switch (family) {
+    case 0x17:
+      switch (model_num) {
+        case 0x0:  // Stepping Ax
+        case 0x1:  // Stepping Bx
+          return CpuType::kAmdNaples;
+        case 0x30:  // Stepping Ax
+        case 0x31:  // Stepping Bx
+          return CpuType::kAmdRome;
+        default:
+          return CpuType::kUnknown;
+      }
+      break;
+    case 0x19:
+      switch (model_num) {
+        case 0x1:  // Stepping B0
+          return CpuType::kAmdMilan;
+        default:
+          return CpuType::kUnknown;
+      }
+      break;
+    default:
+      return CpuType::kUnknown;
+  }
+}
+
+}  // namespace
+
+CpuType GetCpuType() {
+  switch (GetVendor()) {
+    case Vendor::kIntel:
+      return GetIntelCpuType();
+    case Vendor::kAmd:
+      return GetAmdCpuType();
+    default:
+      return CpuType::kUnknown;
+  }
+}
+
+bool SupportsArmCRC32PMULL() { return false; }
+
+#elif defined(__aarch64__) && defined(__linux__)
+
+#ifndef HWCAP_CPUID
+#define HWCAP_CPUID (1 << 11)
+#endif
+
+#define ABSL_INTERNAL_AARCH64_ID_REG_READ(id, val) \
+  asm("mrs %0, " #id : "=r"(val))
+
+CpuType GetCpuType() {
+  // MIDR_EL1 is not visible to EL0, however the access will be emulated by
+  // linux if AT_HWCAP has HWCAP_CPUID set.
+  //
+  // This method will be unreliable on heterogeneous computing systems (ex:
+  // big.LITTLE) since the value of MIDR_EL1 will change based on the calling
+  // thread.
+  uint64_t hwcaps = getauxval(AT_HWCAP);
+  if (hwcaps & HWCAP_CPUID) {
+    uint64_t midr = 0;
+    ABSL_INTERNAL_AARCH64_ID_REG_READ(MIDR_EL1, midr);
+    uint32_t implementer = (midr >> 24) & 0xff;
+    uint32_t part_number = (midr >> 4) & 0xfff;
+    if (implementer == 0x41 && part_number == 0xd0c) {
+      return CpuType::kArmNeoverseN1;
+    }
+  }
+  return CpuType::kUnknown;
+}
+
+bool SupportsArmCRC32PMULL() {
+  uint64_t hwcaps = getauxval(AT_HWCAP);
+  return (hwcaps & HWCAP_CRC32) && (hwcaps & HWCAP_PMULL);
+}
+
+#else
+
+CpuType GetCpuType() { return CpuType::kUnknown; }
+
+bool SupportsArmCRC32PMULL() { return false; }
+
+#endif
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/internal/cpu_detect.h b/abseil-cpp/absl/crc/internal/cpu_detect.h
new file mode 100644
index 0000000..6054f69
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/cpu_detect.h
@@ -0,0 +1,57 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CPU_DETECT_H_
+#define ABSL_CRC_INTERNAL_CPU_DETECT_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+// Enumeration of architectures that we have special-case tuning parameters for.
+// This set may change over time.
+enum class CpuType {
+  kUnknown,
+  kIntelHaswell,
+  kAmdRome,
+  kAmdNaples,
+  kAmdMilan,
+  kIntelCascadelakeXeon,
+  kIntelSkylakeXeon,
+  kIntelBroadwell,
+  kIntelSkylake,
+  kIntelIvybridge,
+  kIntelSandybridge,
+  kIntelWestmere,
+  kArmNeoverseN1,
+};
+
+// Returns the type of host CPU this code is running on.  Returns kUnknown if
+// the host CPU is of unknown type, or if detection otherwise fails.
+CpuType GetCpuType();
+
+// Returns whether the host CPU supports the CPU features needed for our
+// accelerated implementations. The CpuTypes enumerated above apart from
+// kUnknown support the required features. On unknown CPUs, we can use
+// this to see if it's safe to use hardware acceleration, though without any
+// tuning.
+bool SupportsArmCRC32PMULL();
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CPU_DETECT_H_
diff --git a/abseil-cpp/absl/crc/internal/crc.cc b/abseil-cpp/absl/crc/internal/crc.cc
new file mode 100644
index 0000000..22e91c5
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc.cc
@@ -0,0 +1,437 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implementation of CRCs (aka Rabin Fingerprints).
+// Treats the input as a polynomial with coefficients in Z(2),
+// and finds the remainder when divided by an irreducible polynomial
+// of the appropriate length.
+// It handles all CRC sizes from 8 to 128 bits.
+// It's somewhat complicated by having separate implementations optimized for
+// CRC's <=32 bits, <= 64 bits, and <= 128 bits.
+// The input string is prefixed with a "1" bit, and has "degree" "0" bits
+// appended to it before the remainder is found.   This ensures that
+// short strings are scrambled somewhat and that strings consisting
+// of all nulls have a non-zero CRC.
+//
+// Uses the "interleaved word-by-word" method from
+// "Everything we know about CRC but afraid to forget" by Andrew Kadatch
+// and Bob Jenkins,
+// http://crcutil.googlecode.com/files/crc-doc.1.0.pdf
+//
+// The idea is to compute kStride CRCs simultaneously, allowing the
+// processor to more effectively use multiple execution units. Each of
+// the CRCs is calculated on one word of data followed by kStride - 1
+// words of zeroes; the CRC starting points are staggered by one word.
+// Assuming a stride of 4 with data words "ABCDABCDABCD", the first
+// CRC is over A000A000A, the second over 0B000B000B, and so on.
+// The CRC of the whole data is then calculated by properly aligning the
+// CRCs by appending zeroes until the data lengths agree then XORing
+// the CRCs.
+
+#include "absl/crc/internal/crc.h"
+
+#include <cstdint>
+
+#include "absl/base/internal/endian.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/prefetch.h"
+#include "absl/crc/internal/crc_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+namespace {
+
+// Constants
+#if defined(__i386__) || defined(__x86_64__)
+constexpr bool kNeedAlignedLoads = false;
+#else
+constexpr bool kNeedAlignedLoads = true;
+#endif
+
+// We express the number of zeroes as a number in base ZEROES_BASE. By
+// pre-computing the zero extensions for all possible components of such an
+// expression (numbers in a form a*ZEROES_BASE**b), we can calculate the
+// resulting extension by multiplying the extensions for individual components
+// using log_{ZEROES_BASE}(num_zeroes) polynomial multiplications. The tables of
+// zero extensions contain (ZEROES_BASE - 1) * (log_{ZEROES_BASE}(64)) entries.
+constexpr int ZEROES_BASE_LG = 4;                   // log_2(ZEROES_BASE)
+constexpr int ZEROES_BASE = (1 << ZEROES_BASE_LG);  // must be a power of 2
+
+constexpr uint32_t kCrc32cPoly = 0x82f63b78;
+
+uint32_t ReverseBits(uint32_t bits) {
+  bits = (bits & 0xaaaaaaaau) >> 1 | (bits & 0x55555555u) << 1;
+  bits = (bits & 0xccccccccu) >> 2 | (bits & 0x33333333u) << 2;
+  bits = (bits & 0xf0f0f0f0u) >> 4 | (bits & 0x0f0f0f0fu) << 4;
+  return absl::gbswap_32(bits);
+}
+
+// Polynomial long multiplication mod the polynomial of degree 32.
+void PolyMultiply(uint32_t* val, uint32_t m, uint32_t poly) {
+  uint32_t l = *val;
+  uint32_t result = 0;
+  auto onebit = uint32_t{0x80000000u};
+  for (uint32_t one = onebit; one != 0; one >>= 1) {
+    if ((l & one) != 0) {
+      result ^= m;
+    }
+    if (m & 1) {
+      m = (m >> 1) ^ poly;
+    } else {
+      m >>= 1;
+    }
+  }
+  *val = result;
+}
+}  // namespace
+
+void CRCImpl::FillWordTable(uint32_t poly, uint32_t last, int word_size,
+                            Uint32By256* t) {
+  for (int j = 0; j != word_size; j++) {  // for each byte of extension....
+    t[j][0] = 0;                          // a zero has no effect
+    for (int i = 128; i != 0; i >>= 1) {  // fill in entries for powers of 2
+      if (j == 0 && i == 128) {
+        t[j][i] = last;  // top bit in last byte is given
+      } else {
+        // each successive power of two is derived from the previous
+        // one, either in this table, or the last table
+        uint32_t pred;
+        if (i == 128) {
+          pred = t[j - 1][1];
+        } else {
+          pred = t[j][i << 1];
+        }
+        // Advance the CRC by one bit (multiply by X, and take remainder
+        // through one step of polynomial long division)
+        if (pred & 1) {
+          t[j][i] = (pred >> 1) ^ poly;
+        } else {
+          t[j][i] = pred >> 1;
+        }
+      }
+    }
+    // CRCs have the property that CRC(a xor b) == CRC(a) xor CRC(b)
+    // so we can make all the tables for non-powers of two by
+    // xoring previously created entries.
+    for (int i = 2; i != 256; i <<= 1) {
+      for (int k = i + 1; k != (i << 1); k++) {
+        t[j][k] = t[j][i] ^ t[j][k - i];
+      }
+    }
+  }
+}
+
+int CRCImpl::FillZeroesTable(uint32_t poly, Uint32By256* t) {
+  uint32_t inc = 1;
+  inc <<= 31;
+
+  // Extend by one zero bit. We know degree > 1 so (inc & 1) == 0.
+  inc >>= 1;
+
+  // Now extend by 2, 4, and 8 bits, so now `inc` is extended by one zero byte.
+  for (int i = 0; i < 3; ++i) {
+    PolyMultiply(&inc, inc, poly);
+  }
+
+  int j = 0;
+  for (uint64_t inc_len = 1; inc_len != 0; inc_len <<= ZEROES_BASE_LG) {
+    // Every entry in the table adds an additional inc_len zeroes.
+    uint32_t v = inc;
+    for (int a = 1; a != ZEROES_BASE; a++) {
+      t[0][j] = v;
+      PolyMultiply(&v, inc, poly);
+      j++;
+    }
+    inc = v;
+  }
+  ABSL_RAW_CHECK(j <= 256, "");
+  return j;
+}
+
+// Internal version of the "constructor".
+CRCImpl* CRCImpl::NewInternal() {
+  // Find an accelearated implementation first.
+  CRCImpl* result = TryNewCRC32AcceleratedX86ARMCombined();
+
+  // Fall back to generic implementions if no acceleration is available.
+  if (result == nullptr) {
+    result = new CRC32();
+  }
+
+  result->InitTables();
+
+  return result;
+}
+
+//  The 32-bit implementation
+
+void CRC32::InitTables() {
+  // Compute the table for extending a CRC by one byte.
+  Uint32By256* t = new Uint32By256[4];
+  FillWordTable(kCrc32cPoly, kCrc32cPoly, 1, t);
+  for (int i = 0; i != 256; i++) {
+    this->table0_[i] = t[0][i];
+  }
+
+  // Construct a table for updating the CRC by 4 bytes data followed by
+  // 12 bytes of zeroes.
+  //
+  // Note: the data word size could be larger than the CRC size; it might
+  // be slightly faster to use a 64-bit data word, but doing so doubles the
+  // table size.
+  uint32_t last = kCrc32cPoly;
+  const size_t size = 12;
+  for (size_t i = 0; i < size; ++i) {
+    last = (last >> 8) ^ this->table0_[last & 0xff];
+  }
+  FillWordTable(kCrc32cPoly, last, 4, t);
+  for (size_t b = 0; b < 4; ++b) {
+    for (int i = 0; i < 256; ++i) {
+      this->table_[b][i] = t[b][i];
+    }
+  }
+
+  int j = FillZeroesTable(kCrc32cPoly, t);
+  ABSL_RAW_CHECK(j <= static_cast<int>(ABSL_ARRAYSIZE(this->zeroes_)), "");
+  for (int i = 0; i < j; i++) {
+    this->zeroes_[i] = t[0][i];
+  }
+
+  delete[] t;
+
+  // Build up tables for _reversing_ the operation of doing CRC operations on
+  // zero bytes.
+
+  // In C++, extending `crc` by a single zero bit is done by the following:
+  // (A)  bool low_bit_set = (crc & 1);
+  //      crc >>= 1;
+  //      if (low_bit_set) crc ^= kCrc32cPoly;
+  //
+  // In particular note that the high bit of `crc` after this operation will be
+  // set if and only if the low bit of `crc` was set before it.  This means that
+  // no information is lost, and the operation can be reversed, as follows:
+  // (B)  bool high_bit_set = (crc & 0x80000000u);
+  //      if (high_bit_set) crc ^= kCrc32cPoly;
+  //      crc <<= 1;
+  //      if (high_bit_set) crc ^= 1;
+  //
+  // Or, equivalently:
+  // (C)  bool high_bit_set = (crc & 0x80000000u);
+  //      crc <<= 1;
+  //      if (high_bit_set) crc ^= ((kCrc32cPoly << 1) ^ 1);
+  //
+  // The last observation is, if we store our checksums in variable `rcrc`,
+  // with order of the bits reversed, the inverse operation becomes:
+  // (D)  bool low_bit_set = (rcrc & 1);
+  //      rcrc >>= 1;
+  //      if (low_bit_set) rcrc ^= ReverseBits((kCrc32cPoly << 1) ^ 1)
+  //
+  // This is the same algorithm (A) that we started with, only with a different
+  // polynomial bit pattern.  This means that by building up our tables with
+  // this alternate polynomial, we can apply the CRC algorithms to a
+  // bit-reversed CRC checksum to perform inverse zero-extension.
+
+  const uint32_t kCrc32cUnextendPoly =
+      ReverseBits(static_cast<uint32_t>((kCrc32cPoly << 1) ^ 1));
+  FillWordTable(kCrc32cUnextendPoly, kCrc32cUnextendPoly, 1, &reverse_table0_);
+
+  j = FillZeroesTable(kCrc32cUnextendPoly, &reverse_zeroes_);
+  ABSL_RAW_CHECK(j <= static_cast<int>(ABSL_ARRAYSIZE(this->reverse_zeroes_)),
+                 "");
+}
+
+void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
+  const uint8_t* p = static_cast<const uint8_t*>(bytes);
+  const uint8_t* e = p + length;
+  uint32_t l = *crc;
+
+  auto step_one_byte = [this, &p, &l]() {
+    int c = (l & 0xff) ^ *p++;
+    l = this->table0_[c] ^ (l >> 8);
+  };
+
+  if (kNeedAlignedLoads) {
+    // point x at first 4-byte aligned byte in string. this might be past the
+    // end of the string.
+    const uint8_t* x = RoundUp<4>(p);
+    if (x <= e) {
+      // Process bytes until finished or p is 4-byte aligned
+      while (p != x) {
+        step_one_byte();
+      }
+    }
+  }
+
+  const size_t kSwathSize = 16;
+  if (static_cast<size_t>(e - p) >= kSwathSize) {
+    // Load one swath of data into the operating buffers.
+    uint32_t buf0 = absl::little_endian::Load32(p) ^ l;
+    uint32_t buf1 = absl::little_endian::Load32(p + 4);
+    uint32_t buf2 = absl::little_endian::Load32(p + 8);
+    uint32_t buf3 = absl::little_endian::Load32(p + 12);
+    p += kSwathSize;
+
+    // Increment a CRC value by a "swath"; this combines the four bytes
+    // starting at `ptr` and twelve zero bytes, so that four CRCs can be
+    // built incrementally and combined at the end.
+    const auto step_swath = [this](uint32_t crc_in, const std::uint8_t* ptr) {
+      return absl::little_endian::Load32(ptr) ^
+             this->table_[3][crc_in & 0xff] ^
+             this->table_[2][(crc_in >> 8) & 0xff] ^
+             this->table_[1][(crc_in >> 16) & 0xff] ^
+             this->table_[0][crc_in >> 24];
+    };
+
+    // Run one CRC calculation step over all swaths in one 16-byte stride
+    const auto step_stride = [&]() {
+      buf0 = step_swath(buf0, p);
+      buf1 = step_swath(buf1, p + 4);
+      buf2 = step_swath(buf2, p + 8);
+      buf3 = step_swath(buf3, p + 12);
+      p += 16;
+    };
+
+    // Process kStride interleaved swaths through the data in parallel.
+    while ((e - p) > kPrefetchHorizon) {
+      PrefetchToLocalCacheNta(
+          reinterpret_cast<const void*>(p + kPrefetchHorizon));
+      // Process 64 bytes at a time
+      step_stride();
+      step_stride();
+      step_stride();
+      step_stride();
+    }
+    while (static_cast<size_t>(e - p) >= kSwathSize) {
+      step_stride();
+    }
+
+    // Now advance one word at a time as far as possible. This isn't worth
+    // doing if we have word-advance tables.
+    while (static_cast<size_t>(e - p) >= 4) {
+      buf0 = step_swath(buf0, p);
+      uint32_t tmp = buf0;
+      buf0 = buf1;
+      buf1 = buf2;
+      buf2 = buf3;
+      buf3 = tmp;
+      p += 4;
+    }
+
+    // Combine the results from the different swaths. This is just a CRC
+    // on the data values in the bufX words.
+    auto combine_one_word = [this](uint32_t crc_in, uint32_t w) {
+      w ^= crc_in;
+      for (size_t i = 0; i < 4; ++i) {
+        w = (w >> 8) ^ this->table0_[w & 0xff];
+      }
+      return w;
+    };
+
+    l = combine_one_word(0, buf0);
+    l = combine_one_word(l, buf1);
+    l = combine_one_word(l, buf2);
+    l = combine_one_word(l, buf3);
+  }
+
+  // Process the last few bytes
+  while (p != e) {
+    step_one_byte();
+  }
+
+  *crc = l;
+}
+
+void CRC32::ExtendByZeroesImpl(uint32_t* crc, size_t length,
+                               const uint32_t zeroes_table[256],
+                               const uint32_t poly_table[256]) {
+  if (length != 0) {
+    uint32_t l = *crc;
+    // For each ZEROES_BASE_LG bits in length
+    // (after the low-order bits have been removed)
+    // we lookup the appropriate polynomial in the zeroes_ array
+    // and do a polynomial long multiplication (mod the CRC polynomial)
+    // to extend the CRC by the appropriate number of bits.
+    for (int i = 0; length != 0;
+         i += ZEROES_BASE - 1, length >>= ZEROES_BASE_LG) {
+      int c = length & (ZEROES_BASE - 1);  // pick next ZEROES_BASE_LG bits
+      if (c != 0) {                        // if they are not zero,
+                                           // multiply by entry in table
+        // Build a table to aid in multiplying 2 bits at a time.
+        // It takes too long to build tables for more bits.
+        uint64_t m = zeroes_table[c + i - 1];
+        m <<= 1;
+        uint64_t m2 = m << 1;
+        uint64_t mtab[4] = {0, m, m2, m2 ^ m};
+
+        // Do the multiply one byte at a time.
+        uint64_t result = 0;
+        for (int x = 0; x < 32; x += 8) {
+          // The carry-less multiply.
+          result ^= mtab[l & 3] ^ (mtab[(l >> 2) & 3] << 2) ^
+                    (mtab[(l >> 4) & 3] << 4) ^ (mtab[(l >> 6) & 3] << 6);
+          l >>= 8;
+
+          // Reduce modulo the polynomial
+          result = (result >> 8) ^ poly_table[result & 0xff];
+        }
+        l = static_cast<uint32_t>(result);
+      }
+    }
+    *crc = l;
+  }
+}
+
+void CRC32::ExtendByZeroes(uint32_t* crc, size_t length) const {
+  return CRC32::ExtendByZeroesImpl(crc, length, zeroes_, table0_);
+}
+
+void CRC32::UnextendByZeroes(uint32_t* crc, size_t length) const {
+  // See the comment in CRC32::InitTables() for an explanation of the algorithm
+  // below.
+  *crc = ReverseBits(*crc);
+  ExtendByZeroesImpl(crc, length, reverse_zeroes_, reverse_table0_);
+  *crc = ReverseBits(*crc);
+}
+
+void CRC32::Scramble(uint32_t* crc) const {
+  // Rotate by near half the word size plus 1.  See the scramble comment in
+  // crc_internal.h for an explanation.
+  constexpr int scramble_rotate = (32 / 2) + 1;
+  *crc = RotateRight<uint32_t>(static_cast<unsigned int>(*crc + kScrambleLo),
+                               32, scramble_rotate) &
+         MaskOfLength<uint32_t>(32);
+}
+
+void CRC32::Unscramble(uint32_t* crc) const {
+  constexpr int scramble_rotate = (32 / 2) + 1;
+  uint64_t rotated = RotateRight<uint32_t>(static_cast<unsigned int>(*crc), 32,
+                                           32 - scramble_rotate);
+  *crc = (rotated - kScrambleLo) & MaskOfLength<uint32_t>(32);
+}
+
+// Constructor and destructor for base class CRC.
+CRC::~CRC() {}
+CRC::CRC() {}
+
+// The "constructor" for a CRC32C with a standard polynomial.
+CRC* CRC::Crc32c() {
+  static CRC* singleton = CRCImpl::NewInternal();
+  return singleton;
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/internal/crc.h b/abseil-cpp/absl/crc/internal/crc.h
new file mode 100644
index 0000000..4efdd03
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc.h
@@ -0,0 +1,83 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC_H_
+#define ABSL_CRC_INTERNAL_CRC_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+// This class implements CRCs (aka Rabin Fingerprints).
+// Treats the input as a polynomial with coefficients in Z(2),
+// and finds the remainder when divided by an primitive polynomial
+// of the appropriate length.
+
+// A polynomial is represented by the bit pattern formed by its coefficients,
+// but with the highest order bit not stored.
+// The highest degree coefficient is stored in the lowest numbered bit
+// in the lowest addressed byte.   Thus, in what follows, the highest degree
+// coefficient that is stored is in the low order bit of "lo" or "*lo".
+
+// Hardware acceleration is used when available.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+class CRC {
+ public:
+  virtual ~CRC();
+
+  // If "*crc" is the CRC of bytestring A, place the CRC of
+  // the bytestring formed from the concatenation of A and the "length"
+  // bytes at "bytes" into "*crc".
+  virtual void Extend(uint32_t* crc, const void* bytes,
+                      size_t length) const = 0;
+
+  // Equivalent to Extend(crc, bytes, length) where "bytes"
+  // points to an array of "length" zero bytes.
+  virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0;
+
+  // Inverse operation of ExtendByZeroes.  If `crc` is the CRC value of a string
+  // ending in `length` zero bytes, this returns a CRC value of that string
+  // with those zero bytes removed.
+  virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0;
+
+  // Apply a non-linear transformation to "*crc" so that
+  // it is safe to CRC the result with the same polynomial without
+  // any reduction of error-detection ability in the outer CRC.
+  // Unscramble() performs the inverse transformation.
+  // It is strongly recommended that CRCs be scrambled before storage or
+  // transmission, and unscrambled at the other end before further manipulation.
+  virtual void Scramble(uint32_t* crc) const = 0;
+  virtual void Unscramble(uint32_t* crc) const = 0;
+
+  // Crc32c() returns the singleton implementation of CRC for the CRC32C
+  // polynomial.  Returns a handle that MUST NOT be destroyed with delete.
+  static CRC* Crc32c();
+
+ protected:
+  CRC();  // Clients may not call constructor; use Crc32c() instead.
+
+ private:
+  CRC(const CRC&) = delete;
+  CRC& operator=(const CRC&) = delete;
+};
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC_H_
diff --git a/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h b/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
new file mode 100644
index 0000000..39e53dd
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
@@ -0,0 +1,269 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_
+#define ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+// -------------------------------------------------------------------------
+// Many x86 and ARM machines have CRC acceleration hardware.
+// We can do a faster version of Extend() on such machines.
+// We define a translation layer for both x86 and ARM for the ease of use and
+// most performance gains.
+
+// This implementation requires 64-bit CRC instructions (part of SSE 4.2) and
+// PCLMULQDQ instructions. 32-bit builds with SSE 4.2 do exist, so the
+// __x86_64__ condition is necessary.
+#if defined(__x86_64__) && defined(__SSE4_2__) && defined(__PCLMUL__)
+
+#include <x86intrin.h>
+#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
+
+#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__)
+
+// MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ.
+#include <intrin.h>
+#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
+
+#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \
+    defined(__ARM_FEATURE_CRC32) && defined(ABSL_INTERNAL_HAVE_ARM_NEON) &&  \
+    defined(__ARM_FEATURE_CRYPTO)
+
+#include <arm_acle.h>
+#include <arm_neon.h>
+#define ABSL_CRC_INTERNAL_HAVE_ARM_SIMD
+
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \
+    defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD)
+
+#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD)
+using V128 = uint64x2_t;
+#else
+using V128 = __m128i;
+#endif
+
+// Starting with the initial value in |crc|, accumulates a CRC32 value for
+// unsigned integers of different sizes.
+uint32_t CRC32_u8(uint32_t crc, uint8_t v);
+
+uint32_t CRC32_u16(uint32_t crc, uint16_t v);
+
+uint32_t CRC32_u32(uint32_t crc, uint32_t v);
+
+uint32_t CRC32_u64(uint32_t crc, uint64_t v);
+
+// Loads 128 bits of integer data. |src| must be 16-byte aligned.
+V128 V128_Load(const V128* src);
+
+// Load 128 bits of integer data. |src| does not need to be aligned.
+V128 V128_LoadU(const V128* src);
+
+// Polynomially multiplies the high 64 bits of |l| and |r|.
+V128 V128_PMulHi(const V128 l, const V128 r);
+
+// Polynomially multiplies the low 64 bits of |l| and |r|.
+V128 V128_PMulLow(const V128 l, const V128 r);
+
+// Polynomially multiplies the low 64 bits of |r| and high 64 bits of |l|.
+V128 V128_PMul01(const V128 l, const V128 r);
+
+// Polynomially multiplies the low 64 bits of |l| and high 64 bits of |r|.
+V128 V128_PMul10(const V128 l, const V128 r);
+
+// Produces a XOR operation of |l| and |r|.
+V128 V128_Xor(const V128 l, const V128 r);
+
+// Produces an AND operation of |l| and |r|.
+V128 V128_And(const V128 l, const V128 r);
+
+// Sets two 64 bit integers to one 128 bit vector. The order is reverse.
+// dst[63:0] := |r|
+// dst[127:64] := |l|
+V128 V128_From2x64(const uint64_t l, const uint64_t r);
+
+// Shift |l| right by |imm| bytes while shifting in zeros.
+template <int imm>
+V128 V128_ShiftRight(const V128 l);
+
+// Extracts a 32-bit integer from |l|, selected with |imm|.
+template <int imm>
+int V128_Extract32(const V128 l);
+
+// Extracts the low 64 bits from V128.
+int64_t V128_Low64(const V128 l);
+
+// Left-shifts packed 64-bit integers in l by r.
+V128 V128_ShiftLeft64(const V128 l, const V128 r);
+
+#endif
+
+#if defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD)
+
+inline uint32_t CRC32_u8(uint32_t crc, uint8_t v) {
+  return _mm_crc32_u8(crc, v);
+}
+
+inline uint32_t CRC32_u16(uint32_t crc, uint16_t v) {
+  return _mm_crc32_u16(crc, v);
+}
+
+inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) {
+  return _mm_crc32_u32(crc, v);
+}
+
+inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) {
+  return static_cast<uint32_t>(_mm_crc32_u64(crc, v));
+}
+
+inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
+
+inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
+
+inline V128 V128_PMulHi(const V128 l, const V128 r) {
+  return _mm_clmulepi64_si128(l, r, 0x11);
+}
+
+inline V128 V128_PMulLow(const V128 l, const V128 r) {
+  return _mm_clmulepi64_si128(l, r, 0x00);
+}
+
+inline V128 V128_PMul01(const V128 l, const V128 r) {
+  return _mm_clmulepi64_si128(l, r, 0x01);
+}
+
+inline V128 V128_PMul10(const V128 l, const V128 r) {
+  return _mm_clmulepi64_si128(l, r, 0x10);
+}
+
+inline V128 V128_Xor(const V128 l, const V128 r) { return _mm_xor_si128(l, r); }
+
+inline V128 V128_And(const V128 l, const V128 r) { return _mm_and_si128(l, r); }
+
+inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
+  return _mm_set_epi64x(static_cast<int64_t>(l), static_cast<int64_t>(r));
+}
+
+template <int imm>
+inline V128 V128_ShiftRight(const V128 l) {
+  return _mm_srli_si128(l, imm);
+}
+
+template <int imm>
+inline int V128_Extract32(const V128 l) {
+  return _mm_extract_epi32(l, imm);
+}
+
+inline int64_t V128_Low64(const V128 l) { return _mm_cvtsi128_si64(l); }
+
+inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
+  return _mm_sll_epi64(l, r);
+}
+
+#elif defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD)
+
+inline uint32_t CRC32_u8(uint32_t crc, uint8_t v) { return __crc32cb(crc, v); }
+
+inline uint32_t CRC32_u16(uint32_t crc, uint16_t v) {
+  return __crc32ch(crc, v);
+}
+
+inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) {
+  return __crc32cw(crc, v);
+}
+
+inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) {
+  return __crc32cd(crc, v);
+}
+
+inline V128 V128_Load(const V128* src) {
+  return vld1q_u64(reinterpret_cast<const uint64_t*>(src));
+}
+
+inline V128 V128_LoadU(const V128* src) {
+  return vld1q_u64(reinterpret_cast<const uint64_t*>(src));
+}
+
+// Using inline assembly as clang does not generate the pmull2 instruction and
+// performance drops by 15-20%.
+// TODO(b/193678732): Investigate why the compiler decides not to generate
+// such instructions and why it becomes so much worse.
+inline V128 V128_PMulHi(const V128 l, const V128 r) {
+  uint64x2_t res;
+  __asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t"
+                       : "=w"(res)
+                       : "w"(l), "w"(r));
+  return res;
+}
+
+inline V128 V128_PMulLow(const V128 l, const V128 r) {
+  return reinterpret_cast<V128>(vmull_p64(
+      reinterpret_cast<poly64_t>(vget_low_p64(vreinterpretq_p64_u64(l))),
+      reinterpret_cast<poly64_t>(vget_low_p64(vreinterpretq_p64_u64(r)))));
+}
+
+inline V128 V128_PMul01(const V128 l, const V128 r) {
+  return reinterpret_cast<V128>(vmull_p64(
+      reinterpret_cast<poly64_t>(vget_high_p64(vreinterpretq_p64_u64(l))),
+      reinterpret_cast<poly64_t>(vget_low_p64(vreinterpretq_p64_u64(r)))));
+}
+
+inline V128 V128_PMul10(const V128 l, const V128 r) {
+  return reinterpret_cast<V128>(vmull_p64(
+      reinterpret_cast<poly64_t>(vget_low_p64(vreinterpretq_p64_u64(l))),
+      reinterpret_cast<poly64_t>(vget_high_p64(vreinterpretq_p64_u64(r)))));
+}
+
+inline V128 V128_Xor(const V128 l, const V128 r) { return veorq_u64(l, r); }
+
+inline V128 V128_And(const V128 l, const V128 r) { return vandq_u64(l, r); }
+
+inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
+  return vcombine_u64(vcreate_u64(r), vcreate_u64(l));
+}
+
+template <int imm>
+inline V128 V128_ShiftRight(const V128 l) {
+  return vreinterpretq_u64_s8(
+      vextq_s8(vreinterpretq_s8_u64(l), vdupq_n_s8(0), imm));
+}
+
+template <int imm>
+inline int V128_Extract32(const V128 l) {
+  return vgetq_lane_s32(vreinterpretq_s32_u64(l), imm);
+}
+
+inline int64_t V128_Low64(const V128 l) {
+  return vgetq_lane_s64(vreinterpretq_s64_u64(l), 0);
+}
+
+inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
+  return vshlq_u64(l, vreinterpretq_s64_u64(r));
+}
+
+#endif
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_
diff --git a/abseil-cpp/absl/crc/internal/crc32c.h b/abseil-cpp/absl/crc/internal/crc32c.h
new file mode 100644
index 0000000..34027c5
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc32c.h
@@ -0,0 +1,39 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC32C_H_
+#define ABSL_CRC_INTERNAL_CRC32C_H_
+
+#include "absl/base/config.h"
+#include "absl/crc/crc32c.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+// Modifies a CRC32 value by removing `length` bytes with a value of 0 from
+// the end of the string.
+//
+// This is the inverse operation of ExtendCrc32cByZeroes().
+//
+// This operation has a runtime cost of O(log(`length`))
+//
+// Internal implementation detail, exposed for testing only.
+crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length);
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC32C_H_
diff --git a/abseil-cpp/absl/crc/internal/crc32c_inline.h b/abseil-cpp/absl/crc/internal/crc32c_inline.h
new file mode 100644
index 0000000..6236c10
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc32c_inline.h
@@ -0,0 +1,72 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC32C_INLINE_H_
+#define ABSL_CRC_INTERNAL_CRC32C_INLINE_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/endian.h"
+#include "absl/crc/internal/crc32_x86_arm_combined_simd.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+// CRC32C implementation optimized for small inputs.
+// Either computes crc and return true, or if there is
+// no hardware support does nothing and returns false.
+inline bool ExtendCrc32cInline(uint32_t* crc, const char* p, size_t n) {
+#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \
+    defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD)
+  constexpr uint32_t kCrc32Xor = 0xffffffffU;
+  *crc ^= kCrc32Xor;
+  if (n & 1) {
+    *crc = CRC32_u8(*crc, static_cast<uint8_t>(*p));
+    n--;
+    p++;
+  }
+  if (n & 2) {
+    *crc = CRC32_u16(*crc, absl::little_endian::Load16(p));
+    n -= 2;
+    p += 2;
+  }
+  if (n & 4) {
+    *crc = CRC32_u32(*crc, absl::little_endian::Load32(p));
+    n -= 4;
+    p += 4;
+  }
+  while (n) {
+    *crc = CRC32_u64(*crc, absl::little_endian::Load64(p));
+    n -= 8;
+    p += 8;
+  }
+  *crc ^= kCrc32Xor;
+  return true;
+#else
+  // No hardware support, signal the need to fallback.
+  static_cast<void>(crc);
+  static_cast<void>(p);
+  static_cast<void>(n);
+  return false;
+#endif  // defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) ||
+        // defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD)
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC32C_INLINE_H_
diff --git a/abseil-cpp/absl/crc/internal/crc_cord_state.cc b/abseil-cpp/absl/crc/internal/crc_cord_state.cc
new file mode 100644
index 0000000..28d04dc
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_cord_state.cc
@@ -0,0 +1,130 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/internal/crc_cord_state.h"
+
+#include <cassert>
+
+#include "absl/base/config.h"
+#include "absl/numeric/bits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+CrcCordState::RefcountedRep* CrcCordState::RefSharedEmptyRep() {
+  static CrcCordState::RefcountedRep* empty = new CrcCordState::RefcountedRep;
+
+  assert(empty->count.load(std::memory_order_relaxed) >= 1);
+  assert(empty->rep.removed_prefix.length == 0);
+  assert(empty->rep.prefix_crc.empty());
+
+  Ref(empty);
+  return empty;
+}
+
+CrcCordState::CrcCordState() : refcounted_rep_(new RefcountedRep) {}
+
+CrcCordState::CrcCordState(const CrcCordState& other)
+    : refcounted_rep_(other.refcounted_rep_) {
+  Ref(refcounted_rep_);
+}
+
+CrcCordState::CrcCordState(CrcCordState&& other)
+    : refcounted_rep_(other.refcounted_rep_) {
+  // Make `other` valid for use after move.
+  other.refcounted_rep_ = RefSharedEmptyRep();
+}
+
+CrcCordState& CrcCordState::operator=(const CrcCordState& other) {
+  if (this != &other) {
+    Unref(refcounted_rep_);
+    refcounted_rep_ = other.refcounted_rep_;
+    Ref(refcounted_rep_);
+  }
+  return *this;
+}
+
+CrcCordState& CrcCordState::operator=(CrcCordState&& other) {
+  if (this != &other) {
+    Unref(refcounted_rep_);
+    refcounted_rep_ = other.refcounted_rep_;
+    // Make `other` valid for use after move.
+    other.refcounted_rep_ = RefSharedEmptyRep();
+  }
+  return *this;
+}
+
+CrcCordState::~CrcCordState() {
+  Unref(refcounted_rep_);
+}
+
+crc32c_t CrcCordState::Checksum() const {
+  if (rep().prefix_crc.empty()) {
+    return absl::crc32c_t{0};
+  }
+  if (IsNormalized()) {
+    return rep().prefix_crc.back().crc;
+  }
+  return absl::RemoveCrc32cPrefix(
+      rep().removed_prefix.crc, rep().prefix_crc.back().crc,
+      rep().prefix_crc.back().length - rep().removed_prefix.length);
+}
+
+CrcCordState::PrefixCrc CrcCordState::NormalizedPrefixCrcAtNthChunk(
+    size_t n) const {
+  assert(n < NumChunks());
+  if (IsNormalized()) {
+    return rep().prefix_crc[n];
+  }
+  size_t length = rep().prefix_crc[n].length - rep().removed_prefix.length;
+  return PrefixCrc(length,
+                   absl::RemoveCrc32cPrefix(rep().removed_prefix.crc,
+                                            rep().prefix_crc[n].crc, length));
+}
+
+void CrcCordState::Normalize() {
+  if (IsNormalized() || rep().prefix_crc.empty()) {
+    return;
+  }
+
+  Rep* r = mutable_rep();
+  for (auto& prefix_crc : r->prefix_crc) {
+    size_t remaining = prefix_crc.length - r->removed_prefix.length;
+    prefix_crc.crc = absl::RemoveCrc32cPrefix(r->removed_prefix.crc,
+                                              prefix_crc.crc, remaining);
+    prefix_crc.length = remaining;
+  }
+  r->removed_prefix = PrefixCrc();
+}
+
+void CrcCordState::Poison() {
+  Rep* rep = mutable_rep();
+  if (NumChunks() > 0) {
+    for (auto& prefix_crc : rep->prefix_crc) {
+      // This is basically CRC32::Scramble().
+      uint32_t crc = static_cast<uint32_t>(prefix_crc.crc);
+      crc += 0x2e76e41b;
+      crc = absl::rotr(crc, 17);
+      prefix_crc.crc = crc32c_t{crc};
+    }
+  } else {
+    // Add a fake corrupt chunk.
+    rep->prefix_crc.emplace_back(0, crc32c_t{1});
+  }
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/internal/crc_cord_state.h b/abseil-cpp/absl/crc/internal/crc_cord_state.h
new file mode 100644
index 0000000..fbbb8c0
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_cord_state.h
@@ -0,0 +1,159 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_
+#define ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_
+
+#include <atomic>
+#include <cstddef>
+#include <deque>
+
+#include "absl/base/config.h"
+#include "absl/crc/crc32c.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+// CrcCordState is a copy-on-write class that holds the chunked CRC32C data
+// that allows CrcCord to perform efficient substring operations. CrcCordState
+// is used as a member variable in CrcCord. When a CrcCord is converted to a
+// Cord, the CrcCordState is shallow-copied into the root node of the Cord. If
+// the converted Cord is modified outside of CrcCord, the CrcCordState is
+// discarded from the Cord. If the Cord is converted back to a CrcCord, and the
+// Cord is still carrying the CrcCordState in its root node, the CrcCord can
+// re-use the CrcCordState, making the construction of the CrcCord cheap.
+//
+// CrcCordState does not try to encapsulate the CRC32C state (CrcCord requires
+// knowledge of how CrcCordState represents the CRC32C state). It does
+// encapsulate the copy-on-write nature of the state.
+class CrcCordState {
+ public:
+  // Constructors.
+  CrcCordState();
+  CrcCordState(const CrcCordState&);
+  CrcCordState(CrcCordState&&);
+
+  // Destructor. Atomically unreferences the data.
+  ~CrcCordState();
+
+  // Copy and move operators.
+  CrcCordState& operator=(const CrcCordState&);
+  CrcCordState& operator=(CrcCordState&&);
+
+  // A (length, crc) pair.
+  struct PrefixCrc {
+    PrefixCrc() = default;
+    PrefixCrc(size_t length_arg, absl::crc32c_t crc_arg)
+        : length(length_arg), crc(crc_arg) {}
+
+    size_t length = 0;
+
+    // TODO(absl-team): Memory stomping often zeros out memory. If this struct
+    // gets overwritten, we could end up with {0, 0}, which is the correct CRC
+    // for a string of length 0. Consider storing a scrambled value and
+    // unscrambling it before verifying it.
+    absl::crc32c_t crc = absl::crc32c_t{0};
+  };
+
+  // The representation of the chunked CRC32C data.
+  struct Rep {
+    // `removed_prefix` is the crc and length of any prefix that has been
+    // removed from the Cord (for example, by calling
+    // `CrcCord::RemovePrefix()`). To get the checksum of any prefix of the
+    // cord, this value must be subtracted from `prefix_crc`. See `Checksum()`
+    // for an example.
+    //
+    // CrcCordState is said to be "normalized" if removed_prefix.length == 0.
+    PrefixCrc removed_prefix;
+
+    // A deque of (length, crc) pairs, representing length and crc of a prefix
+    // of the Cord, before removed_prefix has been subtracted. The lengths of
+    // the prefixes are stored in increasing order. If the Cord is not empty,
+    // the last value in deque is the contains the CRC32C of the entire Cord
+    // when removed_prefix is subtracted from it.
+    std::deque<PrefixCrc> prefix_crc;
+  };
+
+  // Returns a reference to the representation of the chunked CRC32C data.
+  const Rep& rep() const { return refcounted_rep_->rep; }
+
+  // Returns a mutable reference to the representation of the chunked CRC32C
+  // data. Calling this function will copy the data if another instance also
+  // holds a reference to the data, so it is important to call rep() instead if
+  // the data may not be mutated.
+  Rep* mutable_rep() {
+    if (refcounted_rep_->count.load(std::memory_order_acquire) != 1) {
+      RefcountedRep* copy = new RefcountedRep;
+      copy->rep = refcounted_rep_->rep;
+      Unref(refcounted_rep_);
+      refcounted_rep_ = copy;
+    }
+    return &refcounted_rep_->rep;
+  }
+
+  // Returns the CRC32C of the entire Cord.
+  absl::crc32c_t Checksum() const;
+
+  // Returns true if the chunked CRC32C cached is normalized.
+  bool IsNormalized() const { return rep().removed_prefix.length == 0; }
+
+  // Normalizes the chunked CRC32C checksum cache by subtracting any removed
+  // prefix from the chunks.
+  void Normalize();
+
+  // Returns the number of cached chunks.
+  size_t NumChunks() const { return rep().prefix_crc.size(); }
+
+  // Helper that returns the (length, crc) of the `n`-th cached chunked.
+  PrefixCrc NormalizedPrefixCrcAtNthChunk(size_t n) const;
+
+  // Poisons all chunks to so that Checksum() will likely be incorrect with high
+  // probability.
+  void Poison();
+
+ private:
+  struct RefcountedRep {
+    std::atomic<int32_t> count{1};
+    Rep rep;
+  };
+
+  // Adds a reference to the shared global empty `RefcountedRep`, and returns a
+  // pointer to the `RefcountedRep`. This is an optimization to avoid unneeded
+  // allocations when the allocation is unlikely to ever be used. The returned
+  // pointer can be `Unref()`ed when it is no longer needed.  Since the returned
+  // instance will always have a reference counter greater than 1, attempts to
+  // modify it (by calling `mutable_rep()`) will create a new unshared copy.
+  static RefcountedRep* RefSharedEmptyRep();
+
+  static void Ref(RefcountedRep* r) {
+    assert(r != nullptr);
+    r->count.fetch_add(1, std::memory_order_relaxed);
+  }
+
+  static void Unref(RefcountedRep* r) {
+    assert(r != nullptr);
+    if (r->count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
+      delete r;
+    }
+  }
+
+  RefcountedRep* refcounted_rep_;
+};
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_
diff --git a/abseil-cpp/absl/crc/internal/crc_cord_state_test.cc b/abseil-cpp/absl/crc/internal/crc_cord_state_test.cc
new file mode 100644
index 0000000..e2c8e3c
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_cord_state_test.cc
@@ -0,0 +1,124 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/internal/crc_cord_state.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/crc/crc32c.h"
+
+namespace {
+
+TEST(CrcCordState, Default) {
+  absl::crc_internal::CrcCordState state;
+  EXPECT_TRUE(state.IsNormalized());
+  EXPECT_EQ(state.Checksum(), absl::crc32c_t{0});
+  state.Normalize();
+  EXPECT_EQ(state.Checksum(), absl::crc32c_t{0});
+}
+
+TEST(CrcCordState, Normalize) {
+  absl::crc_internal::CrcCordState state;
+  auto* rep = state.mutable_rep();
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(2000, absl::crc32c_t{2000}));
+  rep->removed_prefix =
+      absl::crc_internal::CrcCordState::PrefixCrc(500, absl::crc32c_t{500});
+
+  // The removed_prefix means state is not normalized.
+  EXPECT_FALSE(state.IsNormalized());
+
+  absl::crc32c_t crc = state.Checksum();
+  state.Normalize();
+  EXPECT_TRUE(state.IsNormalized());
+
+  // The checksum should not change as a result of calling Normalize().
+  EXPECT_EQ(state.Checksum(), crc);
+  EXPECT_EQ(rep->removed_prefix.length, 0);
+}
+
+TEST(CrcCordState, Copy) {
+  absl::crc_internal::CrcCordState state;
+  auto* rep = state.mutable_rep();
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
+
+  absl::crc_internal::CrcCordState copy = state;
+
+  EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000});
+  EXPECT_EQ(copy.Checksum(), absl::crc32c_t{1000});
+}
+
+TEST(CrcCordState, UnsharedSelfCopy) {
+  absl::crc_internal::CrcCordState state;
+  auto* rep = state.mutable_rep();
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
+
+  const absl::crc_internal::CrcCordState& ref = state;
+  state = ref;
+
+  EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000});
+}
+
+TEST(CrcCordState, Move) {
+  absl::crc_internal::CrcCordState state;
+  auto* rep = state.mutable_rep();
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
+
+  absl::crc_internal::CrcCordState moved = std::move(state);
+  EXPECT_EQ(moved.Checksum(), absl::crc32c_t{1000});
+}
+
+TEST(CrcCordState, UnsharedSelfMove) {
+  absl::crc_internal::CrcCordState state;
+  auto* rep = state.mutable_rep();
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
+
+  absl::crc_internal::CrcCordState& ref = state;
+  state = std::move(ref);
+
+  EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000});
+}
+
+TEST(CrcCordState, PoisonDefault) {
+  absl::crc_internal::CrcCordState state;
+  state.Poison();
+  EXPECT_NE(state.Checksum(), absl::crc32c_t{0});
+}
+
+TEST(CrcCordState, PoisonData) {
+  absl::crc_internal::CrcCordState state;
+  auto* rep = state.mutable_rep();
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
+  rep->prefix_crc.push_back(
+      absl::crc_internal::CrcCordState::PrefixCrc(2000, absl::crc32c_t{2000}));
+  rep->removed_prefix =
+      absl::crc_internal::CrcCordState::PrefixCrc(500, absl::crc32c_t{500});
+
+  absl::crc32c_t crc = state.Checksum();
+  state.Poison();
+  EXPECT_NE(state.Checksum(), crc);
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/crc/internal/crc_internal.h b/abseil-cpp/absl/crc/internal/crc_internal.h
new file mode 100644
index 0000000..4d3582d
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_internal.h
@@ -0,0 +1,177 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC_INTERNAL_H_
+#define ABSL_CRC_INTERNAL_CRC_INTERNAL_H_
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/crc/internal/crc.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace crc_internal {
+
+// Prefetch constants used in some Extend() implementations
+constexpr int kPrefetchHorizon = ABSL_CACHELINE_SIZE * 4;  // Prefetch this far
+// Shorter prefetch distance for smaller buffers
+constexpr int kPrefetchHorizonMedium = ABSL_CACHELINE_SIZE * 1;
+static_assert(kPrefetchHorizon >= 64, "CRCPrefetchHorizon less than loop len");
+
+// We require the Scramble() function:
+//  - to be reversible (Unscramble() must exist)
+//  - to be non-linear in the polynomial's Galois field (so the CRC of a
+//    scrambled CRC is not linearly affected by the scrambled CRC, even if
+//    using the same polynomial)
+//  - not to be its own inverse.  Preferably, if X=Scramble^N(X) and N!=0, then
+//    N is large.
+//  - to be fast.
+//  - not to change once defined.
+// We introduce non-linearity in two ways:
+//     Addition of a constant.
+//         - The carries introduce non-linearity; we use bits of an irrational
+//           (phi) to make it unlikely that we introduce no carries.
+//     Rotate by a constant number of bits.
+//         - We use floor(degree/2)+1, which does not divide the degree, and
+//           splits the bits nearly evenly, which makes it less likely the
+//           halves will be the same or one will be all zeroes.
+// We do both things to improve the chances of non-linearity in the face of
+// bit patterns with low numbers of bits set, while still being fast.
+// Below is the constant that we add.  The bits are the first 128 bits of the
+// fractional part of phi, with a 1 ored into the bottom bit to maximize the
+// cycle length of repeated adds.
+constexpr uint64_t kScrambleHi = (static_cast<uint64_t>(0x4f1bbcdcU) << 32) |
+                                 static_cast<uint64_t>(0xbfa53e0aU);
+constexpr uint64_t kScrambleLo = (static_cast<uint64_t>(0xf9ce6030U) << 32) |
+                                 static_cast<uint64_t>(0x2e76e41bU);
+
+class CRCImpl : public CRC {  // Implementation of the abstract class CRC
+ public:
+  using Uint32By256 = uint32_t[256];
+
+  CRCImpl() = default;
+  ~CRCImpl() override = default;
+
+  // The internal version of CRC::New().
+  static CRCImpl* NewInternal();
+
+  // Fill in a table for updating a CRC by one word of 'word_size' bytes
+  // [last_lo, last_hi] contains the answer if the last bit in the word
+  // is set.
+  static void FillWordTable(uint32_t poly, uint32_t last, int word_size,
+                            Uint32By256* t);
+
+  // Build the table for extending by zeroes, returning the number of entries.
+  // For a in {1, 2, ..., ZEROES_BASE-1}, b in {0, 1, 2, 3, ...},
+  // entry j=a-1+(ZEROES_BASE-1)*b
+  // contains a polynomial Pi such that multiplying
+  // a CRC by Pi mod P, where P is the CRC polynomial, is equivalent to
+  // appending a*2**(ZEROES_BASE_LG*b) zero bytes to the original string.
+  static int FillZeroesTable(uint32_t poly, Uint32By256* t);
+
+  virtual void InitTables() = 0;
+
+ private:
+  CRCImpl(const CRCImpl&) = delete;
+  CRCImpl& operator=(const CRCImpl&) = delete;
+};
+
+// This is the 32-bit implementation.  It handles all sizes from 8 to 32.
+class CRC32 : public CRCImpl {
+ public:
+  CRC32() = default;
+  ~CRC32() override = default;
+
+  void Extend(uint32_t* crc, const void* bytes, size_t length) const override;
+  void ExtendByZeroes(uint32_t* crc, size_t length) const override;
+  void Scramble(uint32_t* crc) const override;
+  void Unscramble(uint32_t* crc) const override;
+  void UnextendByZeroes(uint32_t* crc, size_t length) const override;
+
+  void InitTables() override;
+
+ private:
+  // Common implementation guts for ExtendByZeroes and UnextendByZeroes().
+  //
+  // zeroes_table is a table as returned by FillZeroesTable(), containing
+  // polynomials representing CRCs of strings-of-zeros of various lengths,
+  // and which can be combined by polynomial multiplication.  poly_table is
+  // a table of CRC byte extension values.  These tables are determined by
+  // the generator polynomial.
+  //
+  // These will be set to reverse_zeroes_ and reverse_table0_ for Unextend, and
+  // CRC32::zeroes_ and CRC32::table0_ for Extend.
+  static void ExtendByZeroesImpl(uint32_t* crc, size_t length,
+                                 const uint32_t zeroes_table[256],
+                                 const uint32_t poly_table[256]);
+
+  uint32_t table0_[256];  // table of byte extensions
+  uint32_t zeroes_[256];  // table of zero extensions
+
+  // table of 4-byte extensions shifted by 12 bytes of zeroes
+  uint32_t table_[4][256];
+
+  // Reverse lookup tables, using the alternate polynomial used by
+  // UnextendByZeroes().
+  uint32_t reverse_table0_[256];  // table of reverse byte extensions
+  uint32_t reverse_zeroes_[256];  // table of reverse zero extensions
+
+  CRC32(const CRC32&) = delete;
+  CRC32& operator=(const CRC32&) = delete;
+};
+
+// Helpers
+
+// Return a bit mask containing len 1-bits.
+// Requires 0 < len <= sizeof(T)
+template <typename T>
+T MaskOfLength(int len) {
+  // shift 2 by len-1 rather than 1 by len because shifts of wordsize
+  // are undefined.
+  return (T(2) << (len - 1)) - 1;
+}
+
+// Rotate low-order "width" bits of "in" right by "r" bits,
+// setting other bits in word to arbitrary values.
+template <typename T>
+T RotateRight(T in, int width, int r) {
+  return (in << (width - r)) | ((in >> r) & MaskOfLength<T>(width - r));
+}
+
+// RoundUp<N>(p) returns the lowest address >= p aligned to an N-byte
+// boundary.  Requires that N is a power of 2.
+template <int alignment>
+const uint8_t* RoundUp(const uint8_t* p) {
+  static_assert((alignment & (alignment - 1)) == 0, "alignment is not 2^n");
+  constexpr uintptr_t mask = alignment - 1;
+  const uintptr_t as_uintptr = reinterpret_cast<uintptr_t>(p);
+  return reinterpret_cast<const uint8_t*>((as_uintptr + mask) & ~mask);
+}
+
+// Return a newly created CRC32AcceleratedX86ARMCombined if we can use Intel's
+// or ARM's CRC acceleration for a given polynomial.  Return nullptr otherwise.
+CRCImpl* TryNewCRC32AcceleratedX86ARMCombined();
+
+// Return all possible hardware accelerated implementations. For testing only.
+std::vector<std::unique_ptr<CRCImpl>> NewCRC32AcceleratedX86ARMCombinedAll();
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC_INTERNAL_H_
diff --git a/abseil-cpp/absl/crc/internal/crc_memcpy.h b/abseil-cpp/absl/crc/internal/crc_memcpy.h
new file mode 100644
index 0000000..4909d43
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_memcpy.h
@@ -0,0 +1,119 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_CRC_MEMCPY_H_
+#define ABSL_CRC_INTERNAL_CRC_MEMCPY_H_
+
+#include <cstddef>
+#include <memory>
+
+#include "absl/base/config.h"
+#include "absl/crc/crc32c.h"
+
+// Defined if the class AcceleratedCrcMemcpyEngine exists.
+#if defined(__x86_64__) && defined(__SSE4_2__)
+#define ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE 1
+#elif defined(_MSC_VER) && defined(__AVX__)
+#define ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE 1
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+class CrcMemcpyEngine {
+ public:
+  virtual ~CrcMemcpyEngine() = default;
+
+  virtual crc32c_t Compute(void* __restrict dst, const void* __restrict src,
+                           std::size_t length, crc32c_t initial_crc) const = 0;
+
+ protected:
+  CrcMemcpyEngine() = default;
+};
+
+class CrcMemcpy {
+ public:
+  static crc32c_t CrcAndCopy(void* __restrict dst, const void* __restrict src,
+                             std::size_t length,
+                             crc32c_t initial_crc = crc32c_t{0},
+                             bool non_temporal = false) {
+    static const ArchSpecificEngines engines = GetArchSpecificEngines();
+    auto* engine = non_temporal ? engines.non_temporal : engines.temporal;
+    return engine->Compute(dst, src, length, initial_crc);
+  }
+
+  // For testing only: get an architecture-specific engine for tests.
+  static std::unique_ptr<CrcMemcpyEngine> GetTestEngine(int vector,
+                                                        int integer);
+
+ private:
+  struct ArchSpecificEngines {
+    CrcMemcpyEngine* temporal;
+    CrcMemcpyEngine* non_temporal;
+  };
+
+  static ArchSpecificEngines GetArchSpecificEngines();
+};
+
+// Fallback CRC-memcpy engine.
+class FallbackCrcMemcpyEngine : public CrcMemcpyEngine {
+ public:
+  FallbackCrcMemcpyEngine() = default;
+  FallbackCrcMemcpyEngine(const FallbackCrcMemcpyEngine&) = delete;
+  FallbackCrcMemcpyEngine operator=(const FallbackCrcMemcpyEngine&) = delete;
+
+  crc32c_t Compute(void* __restrict dst, const void* __restrict src,
+                   std::size_t length, crc32c_t initial_crc) const override;
+};
+
+// CRC Non-Temporal-Memcpy engine.
+class CrcNonTemporalMemcpyEngine : public CrcMemcpyEngine {
+ public:
+  CrcNonTemporalMemcpyEngine() = default;
+  CrcNonTemporalMemcpyEngine(const CrcNonTemporalMemcpyEngine&) = delete;
+  CrcNonTemporalMemcpyEngine operator=(const CrcNonTemporalMemcpyEngine&) =
+      delete;
+
+  crc32c_t Compute(void* __restrict dst, const void* __restrict src,
+                   std::size_t length, crc32c_t initial_crc) const override;
+};
+
+// CRC Non-Temporal-Memcpy AVX engine.
+class CrcNonTemporalMemcpyAVXEngine : public CrcMemcpyEngine {
+ public:
+  CrcNonTemporalMemcpyAVXEngine() = default;
+  CrcNonTemporalMemcpyAVXEngine(const CrcNonTemporalMemcpyAVXEngine&) = delete;
+  CrcNonTemporalMemcpyAVXEngine operator=(
+      const CrcNonTemporalMemcpyAVXEngine&) = delete;
+
+  crc32c_t Compute(void* __restrict dst, const void* __restrict src,
+                   std::size_t length, crc32c_t initial_crc) const override;
+};
+
+// Copy source to destination and return the CRC32C of the data copied.  If an
+// accelerated version is available, use the accelerated version, otherwise use
+// the generic fallback version.
+inline crc32c_t Crc32CAndCopy(void* __restrict dst, const void* __restrict src,
+                              std::size_t length,
+                              crc32c_t initial_crc = crc32c_t{0},
+                              bool non_temporal = false) {
+  return CrcMemcpy::CrcAndCopy(dst, src, length, initial_crc, non_temporal);
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_CRC_MEMCPY_H_
diff --git a/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc b/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc
new file mode 100644
index 0000000..15b4b05
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc
@@ -0,0 +1,75 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+#include <memory>
+
+#include "absl/base/config.h"
+#include "absl/crc/crc32c.h"
+#include "absl/crc/internal/crc_memcpy.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+absl::crc32c_t FallbackCrcMemcpyEngine::Compute(void* __restrict dst,
+                                                const void* __restrict src,
+                                                std::size_t length,
+                                                crc32c_t initial_crc) const {
+  constexpr size_t kBlockSize = 8192;
+  absl::crc32c_t crc = initial_crc;
+
+  const char* src_bytes = reinterpret_cast<const char*>(src);
+  char* dst_bytes = reinterpret_cast<char*>(dst);
+
+  // Copy + CRC loop - run 8k chunks until we are out of full chunks.  CRC
+  // then copy was found to be slightly more efficient in our test cases.
+  std::size_t offset = 0;
+  for (; offset + kBlockSize < length; offset += kBlockSize) {
+    crc = absl::ExtendCrc32c(crc,
+                             absl::string_view(src_bytes + offset, kBlockSize));
+    memcpy(dst_bytes + offset, src_bytes + offset, kBlockSize);
+  }
+
+  // Save some work if length is 0.
+  if (offset < length) {
+    std::size_t final_copy_size = length - offset;
+    crc = absl::ExtendCrc32c(
+        crc, absl::string_view(src_bytes + offset, final_copy_size));
+    memcpy(dst_bytes + offset, src_bytes + offset, final_copy_size);
+  }
+
+  return crc;
+}
+
+// Compile the following only if we don't have
+#ifndef ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE
+
+CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
+  CrcMemcpy::ArchSpecificEngines engines;
+  engines.temporal = new FallbackCrcMemcpyEngine();
+  engines.non_temporal = new FallbackCrcMemcpyEngine();
+  return engines;
+}
+
+std::unique_ptr<CrcMemcpyEngine> CrcMemcpy::GetTestEngine(int /*vector*/,
+                                                          int /*integer*/) {
+  return std::make_unique<FallbackCrcMemcpyEngine>();
+}
+
+#endif  // ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc b/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc
new file mode 100644
index 0000000..bbdcd20
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc
@@ -0,0 +1,169 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/internal/crc_memcpy.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/crc/crc32c.h"
+#include "absl/memory/memory.h"
+#include "absl/random/distributions.h"
+#include "absl/random/random.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+namespace {
+
+enum CrcEngine {
+  X86 = 0,
+  NONTEMPORAL = 1,
+  FALLBACK = 2,
+};
+
+// Correctness tests:
+// - Every source/destination byte alignment 0-15, every size 0-511 bytes
+// - Arbitrarily aligned source, large size
+template <size_t max_size>
+class CrcMemcpyTest : public testing::Test {
+ protected:
+  CrcMemcpyTest() {
+    source_ = std::make_unique<char[]>(kSize);
+    destination_ = std::make_unique<char[]>(kSize);
+  }
+  static constexpr size_t kAlignment = 16;
+  static constexpr size_t kMaxCopySize = max_size;
+  static constexpr size_t kSize = kAlignment + kMaxCopySize;
+  std::unique_ptr<char[]> source_;
+  std::unique_ptr<char[]> destination_;
+
+  absl::BitGen gen_;
+};
+
+// Small test is slightly larger 4096 bytes to allow coverage of the "large"
+// copy function.  The minimum size to exercise all code paths in that function
+// would be around 256 consecutive tests (getting every possible tail value
+// and 0-2 small copy loops after the main block), so testing from 4096-4500
+// will cover all of those code paths multiple times.
+typedef CrcMemcpyTest<4500> CrcSmallTest;
+typedef CrcMemcpyTest<(1 << 24)> CrcLargeTest;
+// Parametrize the small test so that it can be done with all configurations.
+template <typename ParamsT>
+class x86ParamTestTemplate : public CrcSmallTest,
+                             public ::testing::WithParamInterface<ParamsT> {
+ protected:
+  x86ParamTestTemplate() {
+    if (GetParam().crc_engine_selector == FALLBACK) {
+      engine_ = std::make_unique<absl::crc_internal::FallbackCrcMemcpyEngine>();
+    } else if (GetParam().crc_engine_selector == NONTEMPORAL) {
+      engine_ =
+          std::make_unique<absl::crc_internal::CrcNonTemporalMemcpyEngine>();
+    } else {
+      engine_ = absl::crc_internal::CrcMemcpy::GetTestEngine(
+          GetParam().vector_lanes, GetParam().integer_lanes);
+    }
+  }
+
+  // Convenience method.
+  ParamsT GetParam() const {
+    return ::testing::WithParamInterface<ParamsT>::GetParam();
+  }
+
+  std::unique_ptr<absl::crc_internal::CrcMemcpyEngine> engine_;
+};
+struct TestParams {
+  CrcEngine crc_engine_selector = X86;
+  int vector_lanes = 0;
+  int integer_lanes = 0;
+};
+using x86ParamTest = x86ParamTestTemplate<TestParams>;
+// SmallCorrectness is designed to exercise every possible set of code paths
+// in the memcpy code, not including the loop.
+TEST_P(x86ParamTest, SmallCorrectnessCheckSourceAlignment) {
+  constexpr size_t kTestSizes[] = {0, 100, 255, 512, 1024, 4000, kMaxCopySize};
+
+  for (size_t source_alignment = 0; source_alignment < kAlignment;
+       source_alignment++) {
+    for (auto size : kTestSizes) {
+      char* base_data = static_cast<char*>(source_.get()) + source_alignment;
+      for (size_t i = 0; i < size; i++) {
+        *(base_data + i) =
+            static_cast<char>(absl::Uniform<unsigned char>(gen_));
+      }
+      absl::crc32c_t initial_crc =
+          absl::crc32c_t{absl::Uniform<uint32_t>(gen_)};
+      absl::crc32c_t experiment_crc =
+          engine_->Compute(destination_.get(), source_.get() + source_alignment,
+                           size, initial_crc);
+      // Check the memory region to make sure it is the same
+      int mem_comparison =
+          memcmp(destination_.get(), source_.get() + source_alignment, size);
+      SCOPED_TRACE(absl::StrCat("Error in memcpy of size: ", size,
+                                " with source alignment: ", source_alignment));
+      ASSERT_EQ(mem_comparison, 0);
+      absl::crc32c_t baseline_crc = absl::ExtendCrc32c(
+          initial_crc,
+          absl::string_view(
+              static_cast<char*>(source_.get()) + source_alignment, size));
+      ASSERT_EQ(baseline_crc, experiment_crc);
+    }
+  }
+}
+
+TEST_P(x86ParamTest, SmallCorrectnessCheckDestAlignment) {
+  constexpr size_t kTestSizes[] = {0, 100, 255, 512, 1024, 4000, kMaxCopySize};
+
+  for (size_t dest_alignment = 0; dest_alignment < kAlignment;
+       dest_alignment++) {
+    for (auto size : kTestSizes) {
+      char* base_data = static_cast<char*>(source_.get());
+      for (size_t i = 0; i < size; i++) {
+        *(base_data + i) =
+            static_cast<char>(absl::Uniform<unsigned char>(gen_));
+      }
+      absl::crc32c_t initial_crc =
+          absl::crc32c_t{absl::Uniform<uint32_t>(gen_)};
+      absl::crc32c_t experiment_crc =
+          engine_->Compute(destination_.get() + dest_alignment, source_.get(),
+                           size, initial_crc);
+      // Check the memory region to make sure it is the same
+      int mem_comparison =
+          memcmp(destination_.get() + dest_alignment, source_.get(), size);
+      SCOPED_TRACE(absl::StrCat("Error in memcpy of size: ", size,
+                                " with dest alignment: ", dest_alignment));
+      ASSERT_EQ(mem_comparison, 0);
+      absl::crc32c_t baseline_crc = absl::ExtendCrc32c(
+          initial_crc,
+          absl::string_view(static_cast<char*>(source_.get()), size));
+      ASSERT_EQ(baseline_crc, experiment_crc);
+    }
+  }
+}
+
+INSTANTIATE_TEST_SUITE_P(x86ParamTest, x86ParamTest,
+                         ::testing::Values(
+                             // Tests for configurations that may occur in prod.
+                             TestParams{X86, 3, 0}, TestParams{X86, 1, 2},
+                             // Fallback test.
+                             TestParams{FALLBACK, 0, 0},
+                             // Non Temporal
+                             TestParams{NONTEMPORAL, 0, 0}));
+
+}  // namespace
diff --git a/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc b/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc
new file mode 100644
index 0000000..d42b08d
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc
@@ -0,0 +1,432 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Simultaneous memcopy and CRC-32C for x86-64.  Uses integer registers because
+// XMM registers do not support the CRC instruction (yet).  While copying,
+// compute the running CRC of the data being copied.
+//
+// It is assumed that any CPU running this code has SSE4.2 instructions
+// available (for CRC32C).  This file will do nothing if that is not true.
+//
+// The CRC instruction has a 3-byte latency, and we are stressing the ALU ports
+// here (unlike a traditional memcopy, which has almost no ALU use), so we will
+// need to copy in such a way that the CRC unit is used efficiently. We have two
+// regimes in this code:
+//  1. For operations of size < kCrcSmallSize, do the CRC then the memcpy
+//  2. For operations of size > kCrcSmallSize:
+//      a) compute an initial CRC + copy on a small amount of data to align the
+//         destination pointer on a 16-byte boundary.
+//      b) Split the data into 3 main regions and a tail (smaller than 48 bytes)
+//      c) Do the copy and CRC of the 3 main regions, interleaving (start with
+//         full cache line copies for each region, then move to single 16 byte
+//         pieces per region).
+//      d) Combine the CRCs with CRC32C::Concat.
+//      e) Copy the tail and extend the CRC with the CRC of the tail.
+// This method is not ideal for op sizes between ~1k and ~8k because CRC::Concat
+// takes a significant amount of time.  A medium-sized approach could be added
+// using 3 CRCs over fixed-size blocks where the zero-extensions required for
+// CRC32C::Concat can be precomputed.
+
+#ifdef __SSE4_2__
+#include <immintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#include <array>
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/optimization.h"
+#include "absl/base/prefetch.h"
+#include "absl/crc/crc32c.h"
+#include "absl/crc/internal/cpu_detect.h"
+#include "absl/crc/internal/crc_memcpy.h"
+#include "absl/strings/string_view.h"
+
+#ifdef ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+namespace {
+
+inline crc32c_t ShortCrcCopy(char* dst, const char* src, std::size_t length,
+                             crc32c_t crc) {
+  // Small copy: just go 1 byte at a time: being nice to the branch predictor
+  // is more important here than anything else
+  uint32_t crc_uint32 = static_cast<uint32_t>(crc);
+  for (std::size_t i = 0; i < length; i++) {
+    uint8_t data = *reinterpret_cast<const uint8_t*>(src);
+    crc_uint32 = _mm_crc32_u8(crc_uint32, data);
+    *reinterpret_cast<uint8_t*>(dst) = data;
+    ++src;
+    ++dst;
+  }
+  return crc32c_t{crc_uint32};
+}
+
+constexpr size_t kIntLoadsPerVec = sizeof(__m128i) / sizeof(uint64_t);
+
+// Common function for copying the tails of multiple large regions.
+template <size_t vec_regions, size_t int_regions>
+inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src,
+                          size_t region_size, size_t copy_rounds) {
+  std::array<__m128i, vec_regions> data;
+  std::array<uint64_t, kIntLoadsPerVec * int_regions> int_data;
+
+  while (copy_rounds > 0) {
+    for (size_t i = 0; i < vec_regions; i++) {
+      size_t region = i;
+
+      auto* vsrc =
+          reinterpret_cast<const __m128i*>(*src + region_size * region);
+      auto* vdst = reinterpret_cast<__m128i*>(*dst + region_size * region);
+
+      // Load the blocks, unaligned
+      data[i] = _mm_loadu_si128(vsrc);
+
+      // Store the blocks, aligned
+      _mm_store_si128(vdst, data[i]);
+
+      // Compute the running CRC
+      crcs[region] = crc32c_t{static_cast<uint32_t>(
+          _mm_crc32_u64(static_cast<uint32_t>(crcs[region]),
+                        static_cast<uint64_t>(_mm_extract_epi64(data[i], 0))))};
+      crcs[region] = crc32c_t{static_cast<uint32_t>(
+          _mm_crc32_u64(static_cast<uint32_t>(crcs[region]),
+                        static_cast<uint64_t>(_mm_extract_epi64(data[i], 1))))};
+    }
+
+    for (size_t i = 0; i < int_regions; i++) {
+      size_t region = vec_regions + i;
+
+      auto* usrc =
+          reinterpret_cast<const uint64_t*>(*src + region_size * region);
+      auto* udst = reinterpret_cast<uint64_t*>(*dst + region_size * region);
+
+      for (size_t j = 0; j < kIntLoadsPerVec; j++) {
+        size_t data_index = i * kIntLoadsPerVec + j;
+
+        int_data[data_index] = *(usrc + j);
+        crcs[region] = crc32c_t{static_cast<uint32_t>(_mm_crc32_u64(
+            static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
+
+        *(udst + j) = int_data[data_index];
+      }
+    }
+
+    // Increment pointers
+    *src += sizeof(__m128i);
+    *dst += sizeof(__m128i);
+    --copy_rounds;
+  }
+}
+
+}  // namespace
+
+template <size_t vec_regions, size_t int_regions>
+class AcceleratedCrcMemcpyEngine : public CrcMemcpyEngine {
+ public:
+  AcceleratedCrcMemcpyEngine() = default;
+  AcceleratedCrcMemcpyEngine(const AcceleratedCrcMemcpyEngine&) = delete;
+  AcceleratedCrcMemcpyEngine operator=(const AcceleratedCrcMemcpyEngine&) =
+      delete;
+
+  crc32c_t Compute(void* __restrict dst, const void* __restrict src,
+                   std::size_t length, crc32c_t initial_crc) const override;
+};
+
+template <size_t vec_regions, size_t int_regions>
+crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
+    void* __restrict dst, const void* __restrict src, std::size_t length,
+    crc32c_t initial_crc) const {
+  constexpr std::size_t kRegions = vec_regions + int_regions;
+  constexpr uint32_t kCrcDataXor = uint32_t{0xffffffff};
+  constexpr std::size_t kBlockSize = sizeof(__m128i);
+  constexpr std::size_t kCopyRoundSize = kRegions * kBlockSize;
+
+  // Number of blocks per cacheline.
+  constexpr std::size_t kBlocksPerCacheLine = ABSL_CACHELINE_SIZE / kBlockSize;
+
+  char* dst_bytes = static_cast<char*>(dst);
+  const char* src_bytes = static_cast<const char*>(src);
+
+  // Make sure that one prefetch per big block is enough to cover the whole
+  // dataset, and we don't prefetch too much.
+  static_assert(ABSL_CACHELINE_SIZE % kBlockSize == 0,
+                "Cache lines are not divided evenly into blocks, may have "
+                "unintended behavior!");
+
+  // Experimentally-determined boundary between a small and large copy.
+  // Below this number, spin-up and concatenation of CRCs takes enough time that
+  // it kills the throughput gains of using 3 regions and wide vectors.
+  constexpr size_t kCrcSmallSize = 256;
+
+  // Experimentally-determined prefetch distance.  Main loop copies will
+  // prefeth data 2 cache lines ahead.
+  constexpr std::size_t kPrefetchAhead = 2 * ABSL_CACHELINE_SIZE;
+
+  // Small-size CRC-memcpy : just do CRC + memcpy
+  if (length < kCrcSmallSize) {
+    crc32c_t crc =
+        ExtendCrc32c(initial_crc, absl::string_view(src_bytes, length));
+    memcpy(dst, src, length);
+    return crc;
+  }
+
+  // Start work on the CRC: undo the XOR from the previous calculation or set up
+  // the initial value of the CRC.
+  // initial_crc ^= kCrcDataXor;
+  initial_crc = crc32c_t{static_cast<uint32_t>(initial_crc) ^ kCrcDataXor};
+
+  // Do an initial alignment copy, so we can use aligned store instructions to
+  // the destination pointer.  We align the destination pointer because the
+  // penalty for an unaligned load is small compared to the penalty of an
+  // unaligned store on modern CPUs.
+  std::size_t bytes_from_last_aligned =
+      reinterpret_cast<uintptr_t>(dst) & (kBlockSize - 1);
+  if (bytes_from_last_aligned != 0) {
+    std::size_t bytes_for_alignment = kBlockSize - bytes_from_last_aligned;
+
+    // Do the short-sized copy and CRC.
+    initial_crc =
+        ShortCrcCopy(dst_bytes, src_bytes, bytes_for_alignment, initial_crc);
+    src_bytes += bytes_for_alignment;
+    dst_bytes += bytes_for_alignment;
+    length -= bytes_for_alignment;
+  }
+
+  // We are going to do the copy and CRC in kRegions regions to make sure that
+  // we can saturate the CRC unit.  The CRCs will be combined at the end of the
+  // run.  Copying will use the SSE registers, and we will extract words from
+  // the SSE registers to add to the CRC.  Initially, we run the loop one full
+  // cache line per region at a time, in order to insert prefetches.
+
+  // Initialize CRCs for kRegions regions.
+  crc32c_t crcs[kRegions];
+  crcs[0] = initial_crc;
+  for (size_t i = 1; i < kRegions; i++) {
+    crcs[i] = crc32c_t{kCrcDataXor};
+  }
+
+  // Find the number of rounds to copy and the region size.  Also compute the
+  // tail size here.
+  size_t copy_rounds = length / kCopyRoundSize;
+
+  // Find the size of each region and the size of the tail.
+  const std::size_t region_size = copy_rounds * kBlockSize;
+  const std::size_t tail_size = length - (kRegions * region_size);
+
+  // Holding registers for data in each region.
+  std::array<__m128i, vec_regions> vec_data;
+  std::array<uint64_t, int_regions * kIntLoadsPerVec> int_data;
+
+  // Main loop.
+  while (copy_rounds > kBlocksPerCacheLine) {
+    // Prefetch kPrefetchAhead bytes ahead of each pointer.
+    for (size_t i = 0; i < kRegions; i++) {
+      absl::PrefetchToLocalCache(src_bytes + kPrefetchAhead + region_size * i);
+      absl::PrefetchToLocalCache(dst_bytes + kPrefetchAhead + region_size * i);
+    }
+
+    // Load and store data, computing CRC on the way.
+    for (size_t i = 0; i < kBlocksPerCacheLine; i++) {
+      // Copy and CRC the data for the CRC regions.
+      for (size_t j = 0; j < vec_regions; j++) {
+        // Cycle which regions get vector load/store and integer load/store, to
+        // engage prefetching logic around vector load/stores and save issue
+        // slots by using the integer registers.
+        size_t region = (j + i) % kRegions;
+
+        auto* vsrc =
+            reinterpret_cast<const __m128i*>(src_bytes + region_size * region);
+        auto* vdst =
+            reinterpret_cast<__m128i*>(dst_bytes + region_size * region);
+
+        // Load and CRC data.
+        vec_data[j] = _mm_loadu_si128(vsrc + i);
+        crcs[region] = crc32c_t{static_cast<uint32_t>(_mm_crc32_u64(
+            static_cast<uint32_t>(crcs[region]),
+            static_cast<uint64_t>(_mm_extract_epi64(vec_data[j], 0))))};
+        crcs[region] = crc32c_t{static_cast<uint32_t>(_mm_crc32_u64(
+            static_cast<uint32_t>(crcs[region]),
+            static_cast<uint64_t>(_mm_extract_epi64(vec_data[j], 1))))};
+
+        // Store the data.
+        _mm_store_si128(vdst + i, vec_data[j]);
+      }
+
+      // Preload the partial CRCs for the CLMUL subregions.
+      for (size_t j = 0; j < int_regions; j++) {
+        // Cycle which regions get vector load/store and integer load/store, to
+        // engage prefetching logic around vector load/stores and save issue
+        // slots by using the integer registers.
+        size_t region = (j + vec_regions + i) % kRegions;
+
+        auto* usrc =
+            reinterpret_cast<const uint64_t*>(src_bytes + region_size * region);
+        auto* udst =
+            reinterpret_cast<uint64_t*>(dst_bytes + region_size * region);
+
+        for (size_t k = 0; k < kIntLoadsPerVec; k++) {
+          size_t data_index = j * kIntLoadsPerVec + k;
+
+          // Load and CRC the data.
+          int_data[data_index] = *(usrc + i * kIntLoadsPerVec + k);
+          crcs[region] = crc32c_t{static_cast<uint32_t>(_mm_crc32_u64(
+              static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
+
+          // Store the data.
+          *(udst + i * kIntLoadsPerVec + k) = int_data[data_index];
+        }
+      }
+    }
+
+    // Increment pointers
+    src_bytes += kBlockSize * kBlocksPerCacheLine;
+    dst_bytes += kBlockSize * kBlocksPerCacheLine;
+    copy_rounds -= kBlocksPerCacheLine;
+  }
+
+  // Copy and CRC the tails of each region.
+  LargeTailCopy<vec_regions, int_regions>(crcs, &dst_bytes, &src_bytes,
+                                          region_size, copy_rounds);
+
+  // Move the source and destination pointers to the end of the region
+  src_bytes += region_size * (kRegions - 1);
+  dst_bytes += region_size * (kRegions - 1);
+
+  // Finalize the first CRCs: XOR the internal CRCs by the XOR mask to undo the
+  // XOR done before doing block copy + CRCs.
+  for (size_t i = 0; i + 1 < kRegions; i++) {
+    crcs[i] = crc32c_t{static_cast<uint32_t>(crcs[i]) ^ kCrcDataXor};
+  }
+
+  // Build a CRC of the first kRegions - 1 regions.
+  crc32c_t full_crc = crcs[0];
+  for (size_t i = 1; i + 1 < kRegions; i++) {
+    full_crc = ConcatCrc32c(full_crc, crcs[i], region_size);
+  }
+
+  // Copy and CRC the tail through the XMM registers.
+  std::size_t tail_blocks = tail_size / kBlockSize;
+  LargeTailCopy<0, 1>(&crcs[kRegions - 1], &dst_bytes, &src_bytes, 0,
+                      tail_blocks);
+
+  // Final tail copy for under 16 bytes.
+  crcs[kRegions - 1] =
+      ShortCrcCopy(dst_bytes, src_bytes, tail_size - tail_blocks * kBlockSize,
+                   crcs[kRegions - 1]);
+
+  // Finalize and concatenate the final CRC, then return.
+  crcs[kRegions - 1] =
+      crc32c_t{static_cast<uint32_t>(crcs[kRegions - 1]) ^ kCrcDataXor};
+  return ConcatCrc32c(full_crc, crcs[kRegions - 1], region_size + tail_size);
+}
+
+CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
+#ifdef UNDEFINED_BEHAVIOR_SANITIZER
+  // UBSAN does not play nicely with unaligned loads (which we use a lot).
+  // Get the underlying architecture.
+  CpuType cpu_type = GetCpuType();
+  switch (cpu_type) {
+    case CpuType::kUnknown:
+    case CpuType::kAmdRome:
+    case CpuType::kAmdNaples:
+    case CpuType::kIntelCascadelakeXeon:
+    case CpuType::kIntelSkylakeXeon:
+    case CpuType::kIntelSkylake:
+    case CpuType::kIntelBroadwell:
+    case CpuType::kIntelHaswell:
+    case CpuType::kIntelIvybridge:
+      return {
+          /*.temporal=*/new FallbackCrcMemcpyEngine(),
+          /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
+      };
+    // INTEL_SANDYBRIDGE performs better with SSE than AVX.
+    case CpuType::kIntelSandybridge:
+      return {
+          /*.temporal=*/new FallbackCrcMemcpyEngine(),
+          /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(),
+      };
+    default:
+      return {/*.temporal=*/new FallbackCrcMemcpyEngine(),
+              /*.non_temporal=*/new FallbackCrcMemcpyEngine()};
+  }
+#else
+  // Get the underlying architecture.
+  CpuType cpu_type = GetCpuType();
+  switch (cpu_type) {
+    // On Zen 2, PEXTRQ uses 2 micro-ops, including one on the vector store port
+    // which data movement from the vector registers to the integer registers
+    // (where CRC32C happens) to crowd the same units as vector stores.  As a
+    // result, using that path exclusively causes bottlenecking on this port.
+    // We can avoid this bottleneck by using the integer side of the CPU for
+    // most operations rather than the vector side.  We keep a vector region to
+    // engage some of the prefetching logic in the cache hierarchy which seems
+    // to give vector instructions special treatment.  These prefetch units see
+    // strided access to each region, and do the right thing.
+    case CpuType::kAmdRome:
+    case CpuType::kAmdNaples:
+      return {
+          /*.temporal=*/new AcceleratedCrcMemcpyEngine<1, 2>(),
+          /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
+      };
+    // PCLMULQDQ is slow and we don't have wide enough issue width to take
+    // advantage of it.  For an unknown architecture, don't risk using CLMULs.
+    case CpuType::kIntelCascadelakeXeon:
+    case CpuType::kIntelSkylakeXeon:
+    case CpuType::kIntelSkylake:
+    case CpuType::kIntelBroadwell:
+    case CpuType::kIntelHaswell:
+    case CpuType::kIntelIvybridge:
+      return {
+          /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(),
+          /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
+      };
+    // INTEL_SANDYBRIDGE performs better with SSE than AVX.
+    case CpuType::kIntelSandybridge:
+      return {
+          /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(),
+          /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(),
+      };
+    default:
+      return {/*.temporal=*/new FallbackCrcMemcpyEngine(),
+              /*.non_temporal=*/new FallbackCrcMemcpyEngine()};
+  }
+#endif  // UNDEFINED_BEHAVIOR_SANITIZER
+}
+
+// For testing, allow the user to specify which engine they want.
+std::unique_ptr<CrcMemcpyEngine> CrcMemcpy::GetTestEngine(int vector,
+                                                          int integer) {
+  if (vector == 3 && integer == 0) {
+    return std::make_unique<AcceleratedCrcMemcpyEngine<3, 0>>();
+  } else if (vector == 1 && integer == 2) {
+    return std::make_unique<AcceleratedCrcMemcpyEngine<1, 2>>();
+  }
+  return nullptr;
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE
diff --git a/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc b/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc
new file mode 100644
index 0000000..adc867f
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc
@@ -0,0 +1,93 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/crc/crc32c.h"
+#include "absl/crc/internal/crc_memcpy.h"
+#include "absl/crc/internal/non_temporal_memcpy.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+crc32c_t CrcNonTemporalMemcpyEngine::Compute(void* __restrict dst,
+                                             const void* __restrict src,
+                                             std::size_t length,
+                                             crc32c_t initial_crc) const {
+  constexpr size_t kBlockSize = 8192;
+  crc32c_t crc = initial_crc;
+
+  const char* src_bytes = reinterpret_cast<const char*>(src);
+  char* dst_bytes = reinterpret_cast<char*>(dst);
+
+  // Copy + CRC loop - run 8k chunks until we are out of full chunks.
+  std::size_t offset = 0;
+  for (; offset + kBlockSize < length; offset += kBlockSize) {
+    crc = absl::ExtendCrc32c(crc,
+                             absl::string_view(src_bytes + offset, kBlockSize));
+    non_temporal_store_memcpy(dst_bytes + offset, src_bytes + offset,
+                              kBlockSize);
+  }
+
+  // Save some work if length is 0.
+  if (offset < length) {
+    std::size_t final_copy_size = length - offset;
+    crc = ExtendCrc32c(crc,
+                       absl::string_view(src_bytes + offset, final_copy_size));
+
+    non_temporal_store_memcpy(dst_bytes + offset, src_bytes + offset,
+                              final_copy_size);
+  }
+
+  return crc;
+}
+
+crc32c_t CrcNonTemporalMemcpyAVXEngine::Compute(void* __restrict dst,
+                                                const void* __restrict src,
+                                                std::size_t length,
+                                                crc32c_t initial_crc) const {
+  constexpr size_t kBlockSize = 8192;
+  crc32c_t crc = initial_crc;
+
+  const char* src_bytes = reinterpret_cast<const char*>(src);
+  char* dst_bytes = reinterpret_cast<char*>(dst);
+
+  // Copy + CRC loop - run 8k chunks until we are out of full chunks.
+  std::size_t offset = 0;
+  for (; offset + kBlockSize < length; offset += kBlockSize) {
+    crc = ExtendCrc32c(crc, absl::string_view(src_bytes + offset, kBlockSize));
+
+    non_temporal_store_memcpy_avx(dst_bytes + offset, src_bytes + offset,
+                                  kBlockSize);
+  }
+
+  // Save some work if length is 0.
+  if (offset < length) {
+    std::size_t final_copy_size = length - offset;
+    crc = ExtendCrc32c(crc,
+                       absl::string_view(src_bytes + offset, final_copy_size));
+
+    non_temporal_store_memcpy_avx(dst_bytes + offset, src_bytes + offset,
+                                  final_copy_size);
+  }
+
+  return crc;
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc b/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
new file mode 100644
index 0000000..ef521d2
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
@@ -0,0 +1,725 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Hardware accelerated CRC32 computation on Intel and ARM architecture.
+
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/endian.h"
+#include "absl/base/prefetch.h"
+#include "absl/crc/internal/cpu_detect.h"
+#include "absl/crc/internal/crc.h"
+#include "absl/crc/internal/crc32_x86_arm_combined_simd.h"
+#include "absl/crc/internal/crc_internal.h"
+#include "absl/memory/memory.h"
+#include "absl/numeric/bits.h"
+
+#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \
+    defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD)
+#define ABSL_INTERNAL_CAN_USE_SIMD_CRC32C
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+#if defined(ABSL_INTERNAL_CAN_USE_SIMD_CRC32C)
+
+// Implementation details not exported outside of file
+namespace {
+
+// Some machines have CRC acceleration hardware.
+// We can do a faster version of Extend() on such machines.
+class CRC32AcceleratedX86ARMCombined : public CRC32 {
+ public:
+  CRC32AcceleratedX86ARMCombined() {}
+  ~CRC32AcceleratedX86ARMCombined() override {}
+  void ExtendByZeroes(uint32_t* crc, size_t length) const override;
+  uint32_t ComputeZeroConstant(size_t length) const;
+
+ private:
+  CRC32AcceleratedX86ARMCombined(const CRC32AcceleratedX86ARMCombined&) =
+      delete;
+  CRC32AcceleratedX86ARMCombined& operator=(
+      const CRC32AcceleratedX86ARMCombined&) = delete;
+};
+
+// Constants for switching between algorithms.
+// Chosen by comparing speed at different powers of 2.
+constexpr size_t kSmallCutoff = 256;
+constexpr size_t kMediumCutoff = 2048;
+
+#define ABSL_INTERNAL_STEP1(crc)                      \
+  do {                                                \
+    crc = CRC32_u8(static_cast<uint32_t>(crc), *p++); \
+  } while (0)
+#define ABSL_INTERNAL_STEP2(crc)                                               \
+  do {                                                                         \
+    crc =                                                                      \
+        CRC32_u16(static_cast<uint32_t>(crc), absl::little_endian::Load16(p)); \
+    p += 2;                                                                    \
+  } while (0)
+#define ABSL_INTERNAL_STEP4(crc)                                               \
+  do {                                                                         \
+    crc =                                                                      \
+        CRC32_u32(static_cast<uint32_t>(crc), absl::little_endian::Load32(p)); \
+    p += 4;                                                                    \
+  } while (0)
+#define ABSL_INTERNAL_STEP8(crc, data)                  \
+  do {                                                  \
+    crc = CRC32_u64(static_cast<uint32_t>(crc),         \
+                    absl::little_endian::Load64(data)); \
+    data += 8;                                          \
+  } while (0)
+#define ABSL_INTERNAL_STEP8BY2(crc0, crc1, p0, p1) \
+  do {                                             \
+    ABSL_INTERNAL_STEP8(crc0, p0);                 \
+    ABSL_INTERNAL_STEP8(crc1, p1);                 \
+  } while (0)
+#define ABSL_INTERNAL_STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \
+  do {                                                       \
+    ABSL_INTERNAL_STEP8(crc0, p0);                           \
+    ABSL_INTERNAL_STEP8(crc1, p1);                           \
+    ABSL_INTERNAL_STEP8(crc2, p2);                           \
+  } while (0)
+
+namespace {
+
+uint32_t multiply(uint32_t a, uint32_t b) {
+  V128 shifts = V128_From2x64(0, 1);
+  V128 power = V128_From2x64(0, a);
+  V128 crc = V128_From2x64(0, b);
+  V128 res = V128_PMulLow(power, crc);
+
+  // Combine crc values
+  res = V128_ShiftLeft64(res, shifts);
+  return static_cast<uint32_t>(V128_Extract32<1>(res)) ^
+         CRC32_u32(0, static_cast<uint32_t>(V128_Low64(res)));
+}
+
+// Powers of crc32c polynomial, for faster ExtendByZeros.
+// Verified against folly:
+// folly/hash/detail/Crc32CombineDetail.cpp
+constexpr uint32_t kCRC32CPowers[] = {
+    0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955, 0xb8fdb1e7,
+    0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62, 0x28461564,
+    0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f, 0x538586e3,
+    0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe, 0xe94ca9bc,
+    0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000, 0x00800000,
+    0x00008000, 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955,
+    0xb8fdb1e7, 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62,
+    0x28461564, 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f,
+    0x538586e3, 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe,
+    0xe94ca9bc, 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000,
+    0x00800000, 0x00008000,
+};
+
+}  // namespace
+
+// Compute a magic constant, so that multiplying by it is the same as
+// extending crc by length zeros.
+uint32_t CRC32AcceleratedX86ARMCombined::ComputeZeroConstant(
+    size_t length) const {
+  // Lowest 2 bits are handled separately in ExtendByZeroes
+  length >>= 2;
+
+  int index = absl::countr_zero(length);
+  uint32_t prev = kCRC32CPowers[index];
+  length &= length - 1;
+
+  while (length) {
+    // For each bit of length, extend by 2**n zeros.
+    index = absl::countr_zero(length);
+    prev = multiply(prev, kCRC32CPowers[index]);
+    length &= length - 1;
+  }
+  return prev;
+}
+
+void CRC32AcceleratedX86ARMCombined::ExtendByZeroes(uint32_t* crc,
+                                                    size_t length) const {
+  uint32_t val = *crc;
+  // Don't bother with multiplication for small length.
+  switch (length & 3) {
+    case 0:
+      break;
+    case 1:
+      val = CRC32_u8(val, 0);
+      break;
+    case 2:
+      val = CRC32_u16(val, 0);
+      break;
+    case 3:
+      val = CRC32_u8(val, 0);
+      val = CRC32_u16(val, 0);
+      break;
+  }
+  if (length > 3) {
+    val = multiply(val, ComputeZeroConstant(length));
+  }
+  *crc = val;
+}
+
+// Taken from Intel paper "Fast CRC Computation for iSCSI Polynomial Using CRC32
+// Instruction"
+// https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/crc-iscsi-polynomial-crc32-instruction-paper.pdf
+// We only need every 4th value, because we unroll loop by 4.
+constexpr uint64_t kClmulConstants[] = {
+    0x09e4addf8, 0x0ba4fc28e, 0x00d3b6092, 0x09e4addf8, 0x0ab7aff2a,
+    0x102f9b8a2, 0x0b9e02b86, 0x00d3b6092, 0x1bf2e8b8a, 0x18266e456,
+    0x0d270f1a2, 0x0ab7aff2a, 0x11eef4f8e, 0x083348832, 0x0dd7e3b0c,
+    0x0b9e02b86, 0x0271d9844, 0x1b331e26a, 0x06b749fb2, 0x1bf2e8b8a,
+    0x0e6fc4e6a, 0x0ce7f39f4, 0x0d7a4825c, 0x0d270f1a2, 0x026f6a60a,
+    0x12ed0daac, 0x068bce87a, 0x11eef4f8e, 0x1329d9f7e, 0x0b3e32c28,
+    0x0170076fa, 0x0dd7e3b0c, 0x1fae1cc66, 0x010746f3c, 0x086d8e4d2,
+    0x0271d9844, 0x0b3af077a, 0x093a5f730, 0x1d88abd4a, 0x06b749fb2,
+    0x0c9c8b782, 0x0cec3662e, 0x1ddffc5d4, 0x0e6fc4e6a, 0x168763fa6,
+    0x0b0cd4768, 0x19b1afbc4, 0x0d7a4825c, 0x123888b7a, 0x00167d312,
+    0x133d7a042, 0x026f6a60a, 0x000bcf5f6, 0x19d34af3a, 0x1af900c24,
+    0x068bce87a, 0x06d390dec, 0x16cba8aca, 0x1f16a3418, 0x1329d9f7e,
+    0x19fb2a8b0, 0x02178513a, 0x1a0f717c4, 0x0170076fa,
+};
+
+enum class CutoffStrategy {
+  // Use 3 CRC streams to fold into 1.
+  Fold3,
+  // Unroll CRC instructions for 64 bytes.
+  Unroll64CRC,
+};
+
+// Base class for CRC32AcceleratedX86ARMCombinedMultipleStreams containing the
+// methods and data that don't need the template arguments.
+class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase
+    : public CRC32AcceleratedX86ARMCombined {
+ protected:
+  // Update partialCRC with crc of 64 byte block. Calling FinalizePclmulStream
+  // would produce a single crc checksum, but it is expensive. PCLMULQDQ has a
+  // high latency, so we run 4 128-bit partial checksums that can be reduced to
+  // a single value by FinalizePclmulStream later. Computing crc for arbitrary
+  // polynomialas with PCLMULQDQ is described in Intel paper "Fast CRC
+  // Computation for Generic Polynomials Using PCLMULQDQ Instruction"
+  // https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+  // We are applying it to CRC32C polynomial.
+  ABSL_ATTRIBUTE_ALWAYS_INLINE void Process64BytesPclmul(
+      const uint8_t* p, V128* partialCRC) const {
+    V128 loopMultiplicands = V128_Load(reinterpret_cast<const V128*>(k1k2));
+
+    V128 partialCRC1 = partialCRC[0];
+    V128 partialCRC2 = partialCRC[1];
+    V128 partialCRC3 = partialCRC[2];
+    V128 partialCRC4 = partialCRC[3];
+
+    V128 tmp1 = V128_PMulHi(partialCRC1, loopMultiplicands);
+    V128 tmp2 = V128_PMulHi(partialCRC2, loopMultiplicands);
+    V128 tmp3 = V128_PMulHi(partialCRC3, loopMultiplicands);
+    V128 tmp4 = V128_PMulHi(partialCRC4, loopMultiplicands);
+    V128 data1 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 0));
+    V128 data2 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 1));
+    V128 data3 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 2));
+    V128 data4 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 3));
+    partialCRC1 = V128_PMulLow(partialCRC1, loopMultiplicands);
+    partialCRC2 = V128_PMulLow(partialCRC2, loopMultiplicands);
+    partialCRC3 = V128_PMulLow(partialCRC3, loopMultiplicands);
+    partialCRC4 = V128_PMulLow(partialCRC4, loopMultiplicands);
+    partialCRC1 = V128_Xor(tmp1, partialCRC1);
+    partialCRC2 = V128_Xor(tmp2, partialCRC2);
+    partialCRC3 = V128_Xor(tmp3, partialCRC3);
+    partialCRC4 = V128_Xor(tmp4, partialCRC4);
+    partialCRC1 = V128_Xor(partialCRC1, data1);
+    partialCRC2 = V128_Xor(partialCRC2, data2);
+    partialCRC3 = V128_Xor(partialCRC3, data3);
+    partialCRC4 = V128_Xor(partialCRC4, data4);
+    partialCRC[0] = partialCRC1;
+    partialCRC[1] = partialCRC2;
+    partialCRC[2] = partialCRC3;
+    partialCRC[3] = partialCRC4;
+  }
+
+  // Reduce partialCRC produced by Process64BytesPclmul into a single value,
+  // that represents crc checksum of all the processed bytes.
+  ABSL_ATTRIBUTE_ALWAYS_INLINE uint64_t
+  FinalizePclmulStream(V128* partialCRC) const {
+    V128 partialCRC1 = partialCRC[0];
+    V128 partialCRC2 = partialCRC[1];
+    V128 partialCRC3 = partialCRC[2];
+    V128 partialCRC4 = partialCRC[3];
+
+    // Combine 4 vectors of partial crc into a single vector.
+    V128 reductionMultiplicands =
+        V128_Load(reinterpret_cast<const V128*>(k5k6));
+
+    V128 low = V128_PMulLow(reductionMultiplicands, partialCRC1);
+    V128 high = V128_PMulHi(reductionMultiplicands, partialCRC1);
+
+    partialCRC1 = V128_Xor(low, high);
+    partialCRC1 = V128_Xor(partialCRC1, partialCRC2);
+
+    low = V128_PMulLow(reductionMultiplicands, partialCRC3);
+    high = V128_PMulHi(reductionMultiplicands, partialCRC3);
+
+    partialCRC3 = V128_Xor(low, high);
+    partialCRC3 = V128_Xor(partialCRC3, partialCRC4);
+
+    reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k3k4));
+
+    low = V128_PMulLow(reductionMultiplicands, partialCRC1);
+    high = V128_PMulHi(reductionMultiplicands, partialCRC1);
+    V128 fullCRC = V128_Xor(low, high);
+    fullCRC = V128_Xor(fullCRC, partialCRC3);
+
+    // Reduce fullCRC into scalar value.
+    reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k5k6));
+
+    V128 mask = V128_Load(reinterpret_cast<const V128*>(kMask));
+
+    V128 tmp = V128_PMul01(reductionMultiplicands, fullCRC);
+    fullCRC = V128_ShiftRight<8>(fullCRC);
+    fullCRC = V128_Xor(fullCRC, tmp);
+
+    reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k7k0));
+
+    tmp = V128_ShiftRight<4>(fullCRC);
+    fullCRC = V128_And(fullCRC, mask);
+    fullCRC = V128_PMulLow(reductionMultiplicands, fullCRC);
+    fullCRC = V128_Xor(tmp, fullCRC);
+
+    reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(kPoly));
+
+    tmp = V128_And(fullCRC, mask);
+    tmp = V128_PMul01(reductionMultiplicands, tmp);
+    tmp = V128_And(tmp, mask);
+    tmp = V128_PMulLow(reductionMultiplicands, tmp);
+
+    fullCRC = V128_Xor(tmp, fullCRC);
+
+    return static_cast<uint64_t>(V128_Extract32<1>(fullCRC));
+  }
+
+  // Update crc with 64 bytes of data from p.
+  ABSL_ATTRIBUTE_ALWAYS_INLINE uint64_t Process64BytesCRC(const uint8_t* p,
+                                                          uint64_t crc) const {
+    for (int i = 0; i < 8; i++) {
+      crc =
+          CRC32_u64(static_cast<uint32_t>(crc), absl::little_endian::Load64(p));
+      p += 8;
+    }
+    return crc;
+  }
+
+  // Generated by crc32c_x86_test --crc32c_generate_constants=true
+  // and verified against constants in linux kernel for S390:
+  // https://github.com/torvalds/linux/blob/master/arch/s390/crypto/crc32le-vx.S
+  alignas(16) static constexpr uint64_t k1k2[2] = {0x0740eef02, 0x09e4addf8};
+  alignas(16) static constexpr uint64_t k3k4[2] = {0x1384aa63a, 0x0ba4fc28e};
+  alignas(16) static constexpr uint64_t k5k6[2] = {0x0f20c0dfe, 0x14cd00bd6};
+  alignas(16) static constexpr uint64_t k7k0[2] = {0x0dd45aab8, 0x000000000};
+  alignas(16) static constexpr uint64_t kPoly[2] = {0x105ec76f0, 0x0dea713f1};
+  alignas(16) static constexpr uint32_t kMask[4] = {~0u, 0u, ~0u, 0u};
+
+  // Medium runs of bytes are broken into groups of kGroupsSmall blocks of same
+  // size. Each group is CRCed in parallel then combined at the end of the
+  // block.
+  static constexpr size_t kGroupsSmall = 3;
+  // For large runs we use up to kMaxStreams blocks computed with CRC
+  // instruction, and up to kMaxStreams blocks computed with PCLMULQDQ, which
+  // are combined in the end.
+  static constexpr size_t kMaxStreams = 3;
+};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+alignas(16) constexpr uint64_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k1k2[2];
+alignas(16) constexpr uint64_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k3k4[2];
+alignas(16) constexpr uint64_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k5k6[2];
+alignas(16) constexpr uint64_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k7k0[2];
+alignas(16) constexpr uint64_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kPoly[2];
+alignas(16) constexpr uint32_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kMask[4];
+constexpr size_t
+    CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kGroupsSmall;
+constexpr size_t CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kMaxStreams;
+#endif  // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
+template <size_t num_crc_streams, size_t num_pclmul_streams,
+          CutoffStrategy strategy>
+class CRC32AcceleratedX86ARMCombinedMultipleStreams
+    : public CRC32AcceleratedX86ARMCombinedMultipleStreamsBase {
+  ABSL_ATTRIBUTE_HOT
+  void Extend(uint32_t* crc, const void* bytes, size_t length) const override {
+    static_assert(num_crc_streams >= 1 && num_crc_streams <= kMaxStreams,
+                  "Invalid number of crc streams");
+    static_assert(num_pclmul_streams >= 0 && num_pclmul_streams <= kMaxStreams,
+                  "Invalid number of pclmul streams");
+    const uint8_t* p = static_cast<const uint8_t*>(bytes);
+    const uint8_t* e = p + length;
+    uint32_t l = *crc;
+    uint64_t l64;
+
+    // We have dedicated instruction for 1,2,4 and 8 bytes.
+    if (length & 8) {
+      ABSL_INTERNAL_STEP8(l, p);
+      length &= ~size_t{8};
+    }
+    if (length & 4) {
+      ABSL_INTERNAL_STEP4(l);
+      length &= ~size_t{4};
+    }
+    if (length & 2) {
+      ABSL_INTERNAL_STEP2(l);
+      length &= ~size_t{2};
+    }
+    if (length & 1) {
+      ABSL_INTERNAL_STEP1(l);
+      length &= ~size_t{1};
+    }
+    if (length == 0) {
+      *crc = l;
+      return;
+    }
+    // length is now multiple of 16.
+
+    // For small blocks just run simple loop, because cost of combining multiple
+    // streams is significant.
+    if (strategy != CutoffStrategy::Unroll64CRC) {
+      if (length < kSmallCutoff) {
+        while (length >= 16) {
+          ABSL_INTERNAL_STEP8(l, p);
+          ABSL_INTERNAL_STEP8(l, p);
+          length -= 16;
+        }
+        *crc = l;
+        return;
+      }
+    }
+
+    // For medium blocks we run 3 crc streams and combine them as described in
+    // Intel paper above. Running 4th stream doesn't help, because crc
+    // instruction has latency 3 and throughput 1.
+    if (length < kMediumCutoff) {
+      l64 = l;
+      if (strategy == CutoffStrategy::Fold3) {
+        uint64_t l641 = 0;
+        uint64_t l642 = 0;
+        const size_t blockSize = 32;
+        size_t bs = static_cast<size_t>(e - p) / kGroupsSmall / blockSize;
+        const uint8_t* p1 = p + bs * blockSize;
+        const uint8_t* p2 = p1 + bs * blockSize;
+
+        for (size_t i = 0; i + 1 < bs; ++i) {
+          ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+          ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+          ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+          ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+          PrefetchToLocalCache(
+              reinterpret_cast<const char*>(p + kPrefetchHorizonMedium));
+          PrefetchToLocalCache(
+              reinterpret_cast<const char*>(p1 + kPrefetchHorizonMedium));
+          PrefetchToLocalCache(
+              reinterpret_cast<const char*>(p2 + kPrefetchHorizonMedium));
+        }
+        // Don't run crc on last 8 bytes.
+        ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+        ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+        ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
+        ABSL_INTERNAL_STEP8BY2(l64, l641, p, p1);
+
+        V128 magic = *(reinterpret_cast<const V128*>(kClmulConstants) + bs - 1);
+
+        V128 tmp = V128_From2x64(0, l64);
+
+        V128 res1 = V128_PMulLow(tmp, magic);
+
+        tmp = V128_From2x64(0, l641);
+
+        V128 res2 = V128_PMul10(tmp, magic);
+        V128 x = V128_Xor(res1, res2);
+        l64 = static_cast<uint64_t>(V128_Low64(x)) ^
+              absl::little_endian::Load64(p2);
+        l64 = CRC32_u64(static_cast<uint32_t>(l642), l64);
+
+        p = p2 + 8;
+      } else if (strategy == CutoffStrategy::Unroll64CRC) {
+        while ((e - p) >= 64) {
+          l64 = Process64BytesCRC(p, l64);
+          p += 64;
+        }
+      }
+    } else {
+      // There is a lot of data, we can ignore combine costs and run all
+      // requested streams (num_crc_streams + num_pclmul_streams),
+      // using prefetch. CRC and PCLMULQDQ use different cpu execution units,
+      // so on some cpus it makes sense to execute both of them for different
+      // streams.
+
+      // Point x at first 8-byte aligned byte in string.
+      const uint8_t* x = RoundUp<8>(p);
+      // Process bytes until p is 8-byte aligned, if that isn't past the end.
+      while (p != x) {
+        ABSL_INTERNAL_STEP1(l);
+      }
+
+      size_t bs = static_cast<size_t>(e - p) /
+                  (num_crc_streams + num_pclmul_streams) / 64;
+      const uint8_t* crc_streams[kMaxStreams];
+      const uint8_t* pclmul_streams[kMaxStreams];
+      // We are guaranteed to have at least one crc stream.
+      crc_streams[0] = p;
+      for (size_t i = 1; i < num_crc_streams; i++) {
+        crc_streams[i] = crc_streams[i - 1] + bs * 64;
+      }
+      pclmul_streams[0] = crc_streams[num_crc_streams - 1] + bs * 64;
+      for (size_t i = 1; i < num_pclmul_streams; i++) {
+        pclmul_streams[i] = pclmul_streams[i - 1] + bs * 64;
+      }
+
+      // Per stream crc sums.
+      uint64_t l64_crc[kMaxStreams] = {l};
+      uint64_t l64_pclmul[kMaxStreams] = {0};
+
+      // Peel first iteration, because PCLMULQDQ stream, needs setup.
+      for (size_t i = 0; i < num_crc_streams; i++) {
+        l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]);
+        crc_streams[i] += 16 * 4;
+      }
+
+      V128 partialCRC[kMaxStreams][4];
+      for (size_t i = 0; i < num_pclmul_streams; i++) {
+        partialCRC[i][0] = V128_LoadU(
+            reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 0));
+        partialCRC[i][1] = V128_LoadU(
+            reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 1));
+        partialCRC[i][2] = V128_LoadU(
+            reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 2));
+        partialCRC[i][3] = V128_LoadU(
+            reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 3));
+        pclmul_streams[i] += 16 * 4;
+      }
+
+      for (size_t i = 1; i < bs; i++) {
+        // Prefetch data for next iterations.
+        for (size_t j = 0; j < num_crc_streams; j++) {
+          PrefetchToLocalCache(
+              reinterpret_cast<const char*>(crc_streams[j] + kPrefetchHorizon));
+        }
+        for (size_t j = 0; j < num_pclmul_streams; j++) {
+          PrefetchToLocalCache(reinterpret_cast<const char*>(pclmul_streams[j] +
+                                                             kPrefetchHorizon));
+        }
+
+        // We process each stream in 64 byte blocks. This can be written as
+        // for (int i = 0; i < num_pclmul_streams; i++) {
+        //   Process64BytesPclmul(pclmul_streams[i], partialCRC[i]);
+        //   pclmul_streams[i] += 16 * 4;
+        // }
+        // for (int i = 0; i < num_crc_streams; i++) {
+        //   l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]);
+        //   crc_streams[i] += 16*4;
+        // }
+        // But unrolling and interleaving PCLMULQDQ and CRC blocks manually
+        // gives ~2% performance boost.
+        l64_crc[0] = Process64BytesCRC(crc_streams[0], l64_crc[0]);
+        crc_streams[0] += 16 * 4;
+        if (num_pclmul_streams > 0) {
+          Process64BytesPclmul(pclmul_streams[0], partialCRC[0]);
+          pclmul_streams[0] += 16 * 4;
+        }
+        if (num_crc_streams > 1) {
+          l64_crc[1] = Process64BytesCRC(crc_streams[1], l64_crc[1]);
+          crc_streams[1] += 16 * 4;
+        }
+        if (num_pclmul_streams > 1) {
+          Process64BytesPclmul(pclmul_streams[1], partialCRC[1]);
+          pclmul_streams[1] += 16 * 4;
+        }
+        if (num_crc_streams > 2) {
+          l64_crc[2] = Process64BytesCRC(crc_streams[2], l64_crc[2]);
+          crc_streams[2] += 16 * 4;
+        }
+        if (num_pclmul_streams > 2) {
+          Process64BytesPclmul(pclmul_streams[2], partialCRC[2]);
+          pclmul_streams[2] += 16 * 4;
+        }
+      }
+
+      // PCLMULQDQ based streams require special final step;
+      // CRC based don't.
+      for (size_t i = 0; i < num_pclmul_streams; i++) {
+        l64_pclmul[i] = FinalizePclmulStream(partialCRC[i]);
+      }
+
+      // Combine all streams into single result.
+      uint32_t magic = ComputeZeroConstant(bs * 64);
+      l64 = l64_crc[0];
+      for (size_t i = 1; i < num_crc_streams; i++) {
+        l64 = multiply(static_cast<uint32_t>(l64), magic);
+        l64 ^= l64_crc[i];
+      }
+      for (size_t i = 0; i < num_pclmul_streams; i++) {
+        l64 = multiply(static_cast<uint32_t>(l64), magic);
+        l64 ^= l64_pclmul[i];
+      }
+
+      // Update p.
+      if (num_pclmul_streams > 0) {
+        p = pclmul_streams[num_pclmul_streams - 1];
+      } else {
+        p = crc_streams[num_crc_streams - 1];
+      }
+    }
+    l = static_cast<uint32_t>(l64);
+
+    while ((e - p) >= 16) {
+      ABSL_INTERNAL_STEP8(l, p);
+      ABSL_INTERNAL_STEP8(l, p);
+    }
+    // Process the last few bytes
+    while (p != e) {
+      ABSL_INTERNAL_STEP1(l);
+    }
+
+#undef ABSL_INTERNAL_STEP8BY3
+#undef ABSL_INTERNAL_STEP8BY2
+#undef ABSL_INTERNAL_STEP8
+#undef ABSL_INTERNAL_STEP4
+#undef ABSL_INTERNAL_STEP2
+#undef ABSL_INTERNAL_STEP1
+
+    *crc = l;
+  }
+};
+
+}  // namespace
+
+// Intel processors with SSE4.2 have an instruction for one particular
+// 32-bit CRC polynomial:  crc32c
+CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() {
+  CpuType type = GetCpuType();
+  switch (type) {
+    case CpuType::kIntelHaswell:
+    case CpuType::kAmdRome:
+    case CpuType::kAmdNaples:
+    case CpuType::kAmdMilan:
+      return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
+          3, 1, CutoffStrategy::Fold3>();
+    // PCLMULQDQ is fast, use combined PCLMULQDQ + CRC implementation.
+    case CpuType::kIntelCascadelakeXeon:
+    case CpuType::kIntelSkylakeXeon:
+    case CpuType::kIntelBroadwell:
+    case CpuType::kIntelSkylake:
+      return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
+          3, 2, CutoffStrategy::Fold3>();
+    // PCLMULQDQ is slow, don't use it.
+    case CpuType::kIntelIvybridge:
+    case CpuType::kIntelSandybridge:
+    case CpuType::kIntelWestmere:
+      return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
+          3, 0, CutoffStrategy::Fold3>();
+    case CpuType::kArmNeoverseN1:
+      return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
+          1, 1, CutoffStrategy::Unroll64CRC>();
+#if defined(__aarch64__)
+    default:
+      // Not all ARM processors support the needed instructions, so check here
+      // before trying to use an accelerated implementation.
+      if (SupportsArmCRC32PMULL()) {
+        return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
+            1, 1, CutoffStrategy::Unroll64CRC>();
+      } else {
+        return nullptr;
+      }
+#else
+    default:
+      // Something else, play it safe and assume slow PCLMULQDQ.
+      return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
+          3, 0, CutoffStrategy::Fold3>();
+#endif
+  }
+}
+
+std::vector<std::unique_ptr<CRCImpl>> NewCRC32AcceleratedX86ARMCombinedAll() {
+  auto ret = std::vector<std::unique_ptr<CRCImpl>>();
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 0, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 1, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 2, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 3, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 0, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 1, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 2, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 3, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 0, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 1, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 2, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 3, CutoffStrategy::Fold3>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 0, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 1, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 2, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    1, 3, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 0, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 1, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 2, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    2, 3, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 0, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 1, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 2, CutoffStrategy::Unroll64CRC>>());
+  ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
+                    3, 3, CutoffStrategy::Unroll64CRC>>());
+
+  return ret;
+}
+
+#else  // !ABSL_INTERNAL_CAN_USE_SIMD_CRC32C
+
+std::vector<std::unique_ptr<CRCImpl>> NewCRC32AcceleratedX86ARMCombinedAll() {
+  return std::vector<std::unique_ptr<CRCImpl>>();
+}
+
+// no hardware acceleration available
+CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { return nullptr; }
+
+#endif
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h b/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h
new file mode 100644
index 0000000..9e5ccfc
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h
@@ -0,0 +1,79 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_
+#define ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_
+
+#include "absl/base/config.h"
+
+#ifdef __aarch64__
+#include <arm_neon.h>
+
+typedef int64x2_t __m128i; /* 128-bit vector containing integers */
+#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
+#define vreinterpretq_s64_m128i(x) (x)
+
+// Guarantees that every preceding store is globally visible before any
+// subsequent store.
+// https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx
+static inline __attribute__((always_inline)) void _mm_sfence(void) {
+  __sync_synchronize();
+}
+
+// Load 128-bits of integer data from unaligned memory into dst. This intrinsic
+// may perform better than _mm_loadu_si128 when the data crosses a cache line
+// boundary.
+//
+//   dst[127:0] := MEM[mem_addr+127:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128
+#define _mm_lddqu_si128 _mm_loadu_si128
+
+// Loads 128-bit value. :
+// https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx
+static inline __attribute__((always_inline)) __m128i _mm_loadu_si128(
+    const __m128i *p) {
+  return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *)p));
+}
+
+// Stores the data in a to the address p without polluting the caches.  If the
+// cache line containing address p is already in the cache, the cache will be
+// updated.
+// https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
+static inline __attribute__((always_inline)) void _mm_stream_si128(__m128i *p,
+                                                                   __m128i a) {
+#if ABSL_HAVE_BUILTIN(__builtin_nontemporal_store)
+  __builtin_nontemporal_store(a, p);
+#else
+  vst1q_s64((int64_t *)p, vreinterpretq_s64_m128i(a));
+#endif
+}
+
+// Sets the 16 signed 8-bit integer values.
+// https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx
+static inline __attribute__((always_inline)) __m128i _mm_set_epi8(
+    signed char b15, signed char b14, signed char b13, signed char b12,
+    signed char b11, signed char b10, signed char b9, signed char b8,
+    signed char b7, signed char b6, signed char b5, signed char b4,
+    signed char b3, signed char b2, signed char b1, signed char b0) {
+  int8_t __attribute__((aligned(16)))
+  data[16] = {(int8_t)b0,  (int8_t)b1,  (int8_t)b2,  (int8_t)b3,
+              (int8_t)b4,  (int8_t)b5,  (int8_t)b6,  (int8_t)b7,
+              (int8_t)b8,  (int8_t)b9,  (int8_t)b10, (int8_t)b11,
+              (int8_t)b12, (int8_t)b13, (int8_t)b14, (int8_t)b15};
+  return (__m128i)vld1q_s8(data);
+}
+#endif  // __aarch64__
+
+#endif  // ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_
diff --git a/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h b/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h
new file mode 100644
index 0000000..b3d94ba
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h
@@ -0,0 +1,180 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
+#define ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+#ifdef __SSE3__
+#include <pmmintrin.h>
+#endif
+
+#ifdef __AVX__
+#include <immintrin.h>
+#endif
+
+#ifdef __aarch64__
+#include "absl/crc/internal/non_temporal_arm_intrinsics.h"
+#endif
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+// This non-temporal memcpy does regular load and non-temporal store memory
+// copy. It is compatible to both 16-byte aligned and unaligned addresses. If
+// data at the destination is not immediately accessed, using non-temporal
+// memcpy can save 1 DRAM load of the destination cacheline.
+constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE;
+
+// If the objects overlap, the behavior is undefined.
+inline void *non_temporal_store_memcpy(void *__restrict dst,
+                                       const void *__restrict src, size_t len) {
+#if defined(__SSE3__) || defined(__aarch64__) || \
+    (defined(_MSC_VER) && defined(__AVX__))
+  // This implementation requires SSE3.
+  // MSVC cannot target SSE3 directly, but when MSVC targets AVX,
+  // SSE3 support is implied.
+  uint8_t *d = reinterpret_cast<uint8_t *>(dst);
+  const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
+
+  // memcpy() the misaligned header. At the end of this if block, <d> is
+  // aligned to a 64-byte cacheline boundary or <len> == 0.
+  if (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1)) {
+    uintptr_t bytes_before_alignment_boundary =
+        kCacheLineSize -
+        (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1));
+    size_t header_len = (std::min)(bytes_before_alignment_boundary, len);
+    assert(bytes_before_alignment_boundary < kCacheLineSize);
+    memcpy(d, s, header_len);
+    d += header_len;
+    s += header_len;
+    len -= header_len;
+  }
+
+  if (len >= kCacheLineSize) {
+    _mm_sfence();
+    __m128i *dst_cacheline = reinterpret_cast<__m128i *>(d);
+    const __m128i *src_cacheline = reinterpret_cast<const __m128i *>(s);
+    constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m128i);
+    size_t loops = len / kCacheLineSize;
+
+    while (len >= kCacheLineSize) {
+      __m128i temp1, temp2, temp3, temp4;
+      temp1 = _mm_lddqu_si128(src_cacheline + 0);
+      temp2 = _mm_lddqu_si128(src_cacheline + 1);
+      temp3 = _mm_lddqu_si128(src_cacheline + 2);
+      temp4 = _mm_lddqu_si128(src_cacheline + 3);
+      _mm_stream_si128(dst_cacheline + 0, temp1);
+      _mm_stream_si128(dst_cacheline + 1, temp2);
+      _mm_stream_si128(dst_cacheline + 2, temp3);
+      _mm_stream_si128(dst_cacheline + 3, temp4);
+      src_cacheline += kOpsPerCacheLine;
+      dst_cacheline += kOpsPerCacheLine;
+      len -= kCacheLineSize;
+    }
+    d += loops * kCacheLineSize;
+    s += loops * kCacheLineSize;
+    _mm_sfence();
+  }
+
+  // memcpy the tail.
+  if (len) {
+    memcpy(d, s, len);
+  }
+  return dst;
+#else
+  // Fallback to regular memcpy.
+  return memcpy(dst, src, len);
+#endif  // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__)
+}
+
+inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
+                                           const void *__restrict src,
+                                           size_t len) {
+#ifdef __AVX__
+  uint8_t *d = reinterpret_cast<uint8_t *>(dst);
+  const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
+
+  // memcpy() the misaligned header. At the end of this if block, <d> is
+  // aligned to a 64-byte cacheline boundary or <len> == 0.
+  if (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1)) {
+    uintptr_t bytes_before_alignment_boundary =
+        kCacheLineSize -
+        (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1));
+    size_t header_len = (std::min)(bytes_before_alignment_boundary, len);
+    assert(bytes_before_alignment_boundary < kCacheLineSize);
+    memcpy(d, s, header_len);
+    d += header_len;
+    s += header_len;
+    len -= header_len;
+  }
+
+  if (len >= kCacheLineSize) {
+    _mm_sfence();
+    __m256i *dst_cacheline = reinterpret_cast<__m256i *>(d);
+    const __m256i *src_cacheline = reinterpret_cast<const __m256i *>(s);
+    constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m256i);
+    size_t loops = len / kCacheLineSize;
+
+    while (len >= kCacheLineSize) {
+      __m256i temp1, temp2;
+      temp1 = _mm256_lddqu_si256(src_cacheline + 0);
+      temp2 = _mm256_lddqu_si256(src_cacheline + 1);
+      _mm256_stream_si256(dst_cacheline + 0, temp1);
+      _mm256_stream_si256(dst_cacheline + 1, temp2);
+      src_cacheline += kOpsPerCacheLine;
+      dst_cacheline += kOpsPerCacheLine;
+      len -= kCacheLineSize;
+    }
+    d += loops * kCacheLineSize;
+    s += loops * kCacheLineSize;
+    _mm_sfence();
+  }
+
+  // memcpy the tail.
+  if (len) {
+    memcpy(d, s, len);
+  }
+  return dst;
+#else
+  // Fallback to regular memcpy when AVX is not available.
+  return memcpy(dst, src, len);
+#endif  // __AVX__
+}
+
+}  // namespace crc_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
diff --git a/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc b/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc
new file mode 100644
index 0000000..eb07a55
--- /dev/null
+++ b/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc
@@ -0,0 +1,88 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/crc/internal/non_temporal_memcpy.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <iostream>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+struct TestParam {
+  size_t copy_size;
+  uint32_t src_offset;
+  uint32_t dst_offset;
+};
+
+class NonTemporalMemcpyTest : public testing::TestWithParam<TestParam> {
+ protected:
+  void SetUp() override {
+    // Make buf_size multiple of 16 bytes.
+    size_t buf_size = ((std::max(GetParam().src_offset, GetParam().dst_offset) +
+                        GetParam().copy_size) +
+                       15) /
+                      16 * 16;
+    a_.resize(buf_size);
+    b_.resize(buf_size);
+    for (size_t i = 0; i < buf_size; i++) {
+      a_[i] = static_cast<uint8_t>(i % 256);
+      b_[i] = ~a_[i];
+    }
+  }
+
+  std::vector<uint8_t> a_, b_;
+};
+
+TEST_P(NonTemporalMemcpyTest, SSEEquality) {
+  uint8_t *src = a_.data() + GetParam().src_offset;
+  uint8_t *dst = b_.data() + GetParam().dst_offset;
+  absl::crc_internal::non_temporal_store_memcpy(dst, src, GetParam().copy_size);
+  for (size_t i = 0; i < GetParam().copy_size; i++) {
+    EXPECT_EQ(src[i], dst[i]);
+  }
+}
+
+TEST_P(NonTemporalMemcpyTest, AVXEquality) {
+  uint8_t* src = a_.data() + GetParam().src_offset;
+  uint8_t* dst = b_.data() + GetParam().dst_offset;
+
+  absl::crc_internal::non_temporal_store_memcpy_avx(dst, src,
+                                                    GetParam().copy_size);
+  for (size_t i = 0; i < GetParam().copy_size; i++) {
+    EXPECT_EQ(src[i], dst[i]);
+  }
+}
+
+// 63B is smaller than one cacheline operation thus the non-temporal routine
+// will not be called.
+// 4352B is sufficient for testing 4092B data copy with room for offsets.
+constexpr TestParam params[] = {
+    {63, 0, 0},       {58, 5, 5},    {61, 2, 0},    {61, 0, 2},
+    {58, 5, 2},       {4096, 0, 0},  {4096, 0, 1},  {4096, 0, 2},
+    {4096, 0, 3},     {4096, 0, 4},  {4096, 0, 5},  {4096, 0, 6},
+    {4096, 0, 7},     {4096, 0, 8},  {4096, 0, 9},  {4096, 0, 10},
+    {4096, 0, 11},    {4096, 0, 12}, {4096, 0, 13}, {4096, 0, 14},
+    {4096, 0, 15},    {4096, 7, 7},  {4096, 3, 0},  {4096, 1, 0},
+    {4096, 9, 3},     {4096, 9, 11}, {8192, 0, 0},  {8192, 5, 2},
+    {1024768, 7, 11}, {1, 0, 0},     {1, 0, 1},     {1, 1, 0},
+    {1, 1, 1}};
+
+INSTANTIATE_TEST_SUITE_P(ParameterizedNonTemporalMemcpyTest,
+                         NonTemporalMemcpyTest, testing::ValuesIn(params));
+
+}  // namespace
diff --git a/abseil-cpp/absl/debugging/BUILD.bazel b/abseil-cpp/absl/debugging/BUILD.bazel
index 86faac9..42124bf 100644
--- a/abseil-cpp/absl/debugging/BUILD.bazel
+++ b/abseil-cpp/absl/debugging/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -34,8 +33,10 @@
         "internal/stacktrace_aarch64-inl.inc",
         "internal/stacktrace_arm-inl.inc",
         "internal/stacktrace_config.h",
+        "internal/stacktrace_emscripten-inl.inc",
         "internal/stacktrace_generic-inl.inc",
         "internal/stacktrace_powerpc-inl.inc",
+        "internal/stacktrace_riscv-inl.inc",
         "internal/stacktrace_unimplemented-inl.inc",
         "internal/stacktrace_win32-inl.inc",
         "internal/stacktrace_x86-inl.inc",
@@ -48,6 +49,20 @@
         ":debugging_internal",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:dynamic_annotations",
+        "//absl/base:raw_logging_internal",
+    ],
+)
+
+cc_test(
+    name = "stacktrace_test",
+    srcs = ["stacktrace_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":stacktrace",
+        "//absl/base:core_headers",
+        "@com_google_googletest//:gtest_main",
     ],
 )
 
@@ -57,6 +72,7 @@
         "symbolize.cc",
         "symbolize_darwin.inc",
         "symbolize_elf.inc",
+        "symbolize_emscripten.inc",
         "symbolize_unimplemented.inc",
         "symbolize_win32.inc",
     ],
@@ -66,7 +82,12 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS + select({
-        "//absl:windows": ["-DEFAULTLIB:dbghelp.lib"],
+        "//absl:msvc_compiler": ["-DEFAULTLIB:dbghelp.lib"],
+        "//absl:clang-cl_compiler": ["-DEFAULTLIB:dbghelp.lib"],
+        "//absl:mingw_compiler": [
+            "-DEFAULTLIB:dbghelp.lib",
+            "-ldbghelp",
+        ],
         "//conditions:default": [],
     }),
     deps = [
@@ -86,11 +107,13 @@
     name = "symbolize_test",
     srcs = ["symbolize_test.cc"],
     copts = ABSL_TEST_COPTS + select({
-        "//absl:windows": ["/Z7"],
+        "//absl:msvc_compiler": ["/Z7"],
+        "//absl:clang-cl_compiler": ["/Z7"],
         "//conditions:default": [],
     }),
     linkopts = ABSL_DEFAULT_LINKOPTS + select({
-        "//absl:windows": ["/DEBUG"],
+        "//absl:msvc_compiler": ["/DEBUG"],
+        "//absl:clang-cl_compiler": ["/DEBUG"],
         "//conditions:default": [],
     }),
     deps = [
@@ -99,7 +122,8 @@
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/log:check",
         "//absl/memory",
         "//absl/strings",
         "@com_google_googletest//:gtest",
@@ -116,7 +140,7 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
+    visibility = ["//absl/log/internal:__pkg__"],
     deps = [
         ":stacktrace",
         ":symbolize",
@@ -138,7 +162,6 @@
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:errno_saver",
         "//absl/base:raw_logging_internal",
     ],
 )
@@ -148,7 +171,8 @@
     srcs = ["failure_signal_handler_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = select({
-        "//absl:windows": [],
+        "//absl:msvc_compiler": [],
+        "//absl:clang-cl_compiler": [],
         "//absl:wasm": [],
         "//conditions:default": ["-pthread"],
     }) + ABSL_DEFAULT_LINKOPTS,
@@ -157,6 +181,7 @@
         ":stacktrace",
         ":symbolize",
         "//absl/base:raw_logging_internal",
+        "//absl/log:check",
         "//absl/strings",
         "@com_google_googletest//:gtest",
     ],
@@ -176,6 +201,7 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
@@ -190,6 +216,8 @@
     srcs = ["internal/demangle.cc"],
     hdrs = ["internal/demangle.h"],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
     deps = [
         "//absl/base",
         "//absl/base:config",
@@ -207,7 +235,7 @@
         ":stack_consumption",
         "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/memory",
         "@com_google_googletest//:gtest_main",
     ],
@@ -217,6 +245,7 @@
     name = "leak_check",
     srcs = ["leak_check.cc"],
     hdrs = ["leak_check.h"],
+    copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:config",
@@ -224,96 +253,33 @@
     ],
 )
 
-# Adding a dependency to leak_check_disable will disable
-# sanitizer leak checking (asan/lsan) in a test without
-# the need to mess around with build features.
-cc_library(
-    name = "leak_check_disable",
-    srcs = ["leak_check_disable.cc"],
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    linkstatic = 1,
-    deps = ["//absl/base:config"],
-    alwayslink = 1,
-)
-
-# These targets exists for use in tests only, explicitly configuring the
-# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan.
-ABSL_LSAN_LINKOPTS = select({
-    "//absl:llvm_compiler": ["-fsanitize=leak"],
-    "//conditions:default": [],
-})
-
-cc_library(
-    name = "leak_check_api_enabled_for_testing",
-    testonly = 1,
-    srcs = ["leak_check.cc"],
-    hdrs = ["leak_check.h"],
-    copts = select({
-        "//absl:llvm_compiler": ["-DLEAK_SANITIZER"],
-        "//conditions:default": [],
-    }),
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
-    deps = [
-        "//absl/base:config",
-    ],
-)
-
-cc_library(
-    name = "leak_check_api_disabled_for_testing",
-    testonly = 1,
-    srcs = ["leak_check.cc"],
-    hdrs = ["leak_check.h"],
-    copts = ["-ULEAK_SANITIZER"],
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
-    deps = [
-        "//absl/base:config",
-    ],
-)
-
 cc_test(
     name = "leak_check_test",
     srcs = ["leak_check_test.cc"],
-    copts = select({
-        "//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"],
-        "//conditions:default": [],
-    }),
-    linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
-    tags = ["notsan"],
-    deps = [
-        ":leak_check_api_enabled_for_testing",
-        "//absl/base",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_test(
-    name = "leak_check_no_lsan_test",
-    srcs = ["leak_check_test.cc"],
-    copts = ["-UABSL_EXPECT_LEAK_SANITIZER"],
+    copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = ["noasan"],
+    tags = ["notsan"],
     deps = [
-        ":leak_check_api_disabled_for_testing",
-        "//absl/base",  # for raw_logging
+        ":leak_check",
+        "//absl/base:config",
+        "//absl/log",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
-# Test that leak checking is skipped when lsan is enabled but
-# ":leak_check_disable" is linked in.
-#
-# This test should fail in the absence of a dependency on ":leak_check_disable"
-cc_test(
-    name = "disabled_leak_check_test",
+# Binary that leaks memory and expects to fail on exit.  This isn't a
+# test that expected to pass on its own; it exists to be called by a
+# script that checks exit status and output.
+# TODO(absl-team): Write a test to run this with a script that
+# verifies that it correctly fails.
+cc_binary(
+    name = "leak_check_fail_test_binary",
     srcs = ["leak_check_fail_test.cc"],
-    linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
-    tags = ["notsan"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
-        ":leak_check_api_enabled_for_testing",
-        ":leak_check_disable",
-        "//absl/base",
+        ":leak_check",
+        "//absl/log",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -338,10 +304,26 @@
     srcs = ["internal/stack_consumption_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["notsan"],
     deps = [
         ":stack_consumption",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "@com_google_googletest//:gtest_main",
     ],
 )
+
+cc_binary(
+    name = "stacktrace_benchmark",
+    testonly = 1,
+    srcs = ["stacktrace_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    deps = [
+        ":stacktrace",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
diff --git a/abseil-cpp/absl/debugging/CMakeLists.txt b/abseil-cpp/absl/debugging/CMakeLists.txt
index 074b44c..65e2af8 100644
--- a/abseil-cpp/absl/debugging/CMakeLists.txt
+++ b/abseil-cpp/absl/debugging/CMakeLists.txt
@@ -14,6 +14,8 @@
 # limitations under the License.
 #
 
+find_library(EXECINFO_LIBRARY execinfo)
+
 absl_cc_library(
   NAME
     stacktrace
@@ -22,8 +24,10 @@
     "internal/stacktrace_aarch64-inl.inc"
     "internal/stacktrace_arm-inl.inc"
     "internal/stacktrace_config.h"
+    "internal/stacktrace_emscripten-inl.inc"
     "internal/stacktrace_generic-inl.inc"
     "internal/stacktrace_powerpc-inl.inc"
+    "internal/stacktrace_riscv-inl.inc"
     "internal/stacktrace_unimplemented-inl.inc"
     "internal/stacktrace_win32-inl.inc"
     "internal/stacktrace_x86-inl.inc"
@@ -31,13 +35,30 @@
     "stacktrace.cc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    $<$<BOOL:${EXECINFO_LIBRARY}>:${EXECINFO_LIBRARY}>
   DEPS
     absl::debugging_internal
     absl::config
     absl::core_headers
+    absl::dynamic_annotations
+    absl::raw_logging_internal
   PUBLIC
 )
 
+absl_cc_test(
+  NAME
+    stacktrace_test
+  SRCS
+    "stacktrace_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::stacktrace
+    absl::core_headers
+    GTest::gmock_main
+)
+
 absl_cc_library(
   NAME
     symbolize
@@ -48,13 +69,14 @@
     "symbolize.cc"
     "symbolize_darwin.inc"
     "symbolize_elf.inc"
+    "symbolize_emscripten.inc"
     "symbolize_unimplemented.inc"
     "symbolize_win32.inc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
-    $<$<BOOL:${MINGW}>:"dbghelp">
+    $<$<BOOL:${MINGW}>:-ldbghelp>
   DEPS
     absl::debugging_internal
     absl::demangle_internal
@@ -79,17 +101,19 @@
   LINKOPTS
     $<$<BOOL:${MSVC}>:-DEBUG>
   DEPS
-    absl::stack_consumption
-    absl::symbolize
     absl::base
+    absl::check
     absl::config
     absl::core_headers
+    absl::log
     absl::memory
-    absl::raw_logging_internal
+    absl::stack_consumption
     absl::strings
-    gmock
+    absl::symbolize
+    GTest::gmock
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     examine_stack
@@ -122,7 +146,6 @@
     absl::base
     absl::config
     absl::core_headers
-    absl::errno_saver
     absl::raw_logging_internal
   PUBLIC
 )
@@ -135,15 +158,17 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::check
     absl::failure_signal_handler
     absl::stacktrace
     absl::symbolize
     absl::strings
     absl::raw_logging_internal
     Threads::Threads
-    gmock
+    GTest::gmock
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     debugging_internal
@@ -165,6 +190,7 @@
     absl::raw_logging_internal
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     demangle_internal
@@ -192,9 +218,9 @@
     absl::stack_consumption
     absl::config
     absl::core_headers
+    absl::log
     absl::memory
-    absl::raw_logging_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -212,42 +238,6 @@
   PUBLIC
 )
 
-absl_cc_library(
-  NAME
-    leak_check_disable
-  SRCS
-    "leak_check_disable.cc"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  PUBLIC
-)
-
-absl_cc_library(
-  NAME
-    leak_check_api_enabled_for_testing
-  HDRS
-    "leak_check.h"
-  SRCS
-    "leak_check.cc"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-    $<$<BOOL:${ABSL_HAVE_LSAN}>:-DLEAK_SANITIZER>
-  TESTONLY
-)
-
-absl_cc_library(
-  NAME
-    leak_check_api_disabled_for_testing
-  HDRS
-    "leak_check.h"
-  SRCS
-    "leak_check.cc"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-    "-ULEAK_SANITIZER"
-  TESTONLY
-)
-
 absl_cc_test(
   NAME
     leak_check_test
@@ -255,46 +245,16 @@
     "leak_check_test.cc"
   COPTS
     ${ABSL_TEST_COPTS}
-    "$<$<BOOL:${ABSL_HAVE_LSAN}>:-DABSL_EXPECT_LEAK_SANITIZER>"
   LINKOPTS
-    "${ABSL_LSAN_LINKOPTS}"
+    ${ABSL_DEFAULT_LINKOPTS}
   DEPS
-    absl::leak_check_api_enabled_for_testing
+    absl::leak_check
     absl::base
-    gmock_main
+    absl::log
+    GTest::gmock_main
 )
 
-absl_cc_test(
-  NAME
-    leak_check_no_lsan_test
-  SRCS
-    "leak_check_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-    "-UABSL_EXPECT_LEAK_SANITIZER"
-  DEPS
-    absl::leak_check_api_disabled_for_testing
-    absl::base
-    gmock_main
-)
-
-absl_cc_test(
-  NAME
-    disabled_leak_check_test
-  SRCS
-    "leak_check_fail_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  LINKOPTS
-    "${ABSL_LSAN_LINKOPTS}"
-  DEPS
-    absl::leak_check_api_enabled_for_testing
-    absl::leak_check_disable
-    absl::base
-    absl::raw_logging_internal
-    gmock_main
-)
-
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     stack_consumption
@@ -321,8 +281,8 @@
   DEPS
     absl::stack_consumption
     absl::core_headers
-    absl::raw_logging_internal
-    gmock_main
+    absl::log
+    GTest::gmock_main
 )
 
 # component target
diff --git a/abseil-cpp/absl/debugging/failure_signal_handler.cc b/abseil-cpp/absl/debugging/failure_signal_handler.cc
index 5d13bdb..992c89c 100644
--- a/abseil-cpp/absl/debugging/failure_signal_handler.cc
+++ b/abseil-cpp/absl/debugging/failure_signal_handler.cc
@@ -21,6 +21,7 @@
 #ifdef _WIN32
 #include <windows.h>
 #else
+#include <sched.h>
 #include <unistd.h>
 #endif
 
@@ -30,6 +31,13 @@
 
 #ifdef ABSL_HAVE_MMAP
 #include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
+
+#ifdef __linux__
+#include <sys/prctl.h>
 #endif
 
 #include <algorithm>
@@ -41,7 +49,6 @@
 #include <ctime>
 
 #include "absl/base/attributes.h"
-#include "absl/base/internal/errno_saver.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/sysinfo.h"
 #include "absl/debugging/internal/examine_stack.h"
@@ -50,8 +57,10 @@
 #ifndef _WIN32
 #define ABSL_HAVE_SIGACTION
 // Apple WatchOS and TVOS don't allow sigaltstack
-#if !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \
-    !(defined(TARGET_OS_TV) && TARGET_OS_TV)
+// Apple macOS has sigaltstack, but using it makes backtrace() unusable.
+#if !(defined(TARGET_OS_OSX) && TARGET_OS_OSX) &&     \
+    !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \
+    !(defined(TARGET_OS_TV) && TARGET_OS_TV) && !defined(__QNX__)
 #define ABSL_HAVE_SIGALTSTACK
 #endif
 #endif
@@ -75,10 +84,10 @@
   struct sigaction previous_action;
   // StructSigaction is used to silence -Wmissing-field-initializers.
   using StructSigaction = struct sigaction;
-  #define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
+#define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
 #else
   void (*previous_handler)(int);
-  #define FSD_PREVIOUS_INIT SIG_DFL
+#define FSD_PREVIOUS_INIT SIG_DFL
 #endif
 };
 
@@ -130,12 +139,14 @@
 #ifdef ABSL_HAVE_SIGALTSTACK
 
 static bool SetupAlternateStackOnce() {
-#if defined(__wasm__) || defined (__asjms__)
+#if defined(__wasm__) || defined(__asjms__)
   const size_t page_mask = getpagesize() - 1;
 #else
-  const size_t page_mask = sysconf(_SC_PAGESIZE) - 1;
+  const size_t page_mask = static_cast<size_t>(sysconf(_SC_PAGESIZE)) - 1;
 #endif
-  size_t stack_size = (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask;
+  size_t stack_size =
+      (std::max(static_cast<size_t>(SIGSTKSZ), size_t{65536}) + page_mask) &
+      ~page_mask;
 #if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
     defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
   // Account for sanitizer instrumentation requiring additional stack space.
@@ -150,9 +161,6 @@
 #ifndef MAP_STACK
 #define MAP_STACK 0
 #endif
-#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
   sigstk.ss_sp = mmap(nullptr, sigstk.ss_size, PROT_READ | PROT_WRITE,
                       MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
   if (sigstk.ss_sp == MAP_FAILED) {
@@ -168,6 +176,20 @@
   if (sigaltstack(&sigstk, nullptr) != 0) {
     ABSL_RAW_LOG(FATAL, "sigaltstack() failed with errno=%d", errno);
   }
+
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+  // Make a best-effort attempt to name the allocated region in
+  // /proc/$PID/smaps.
+  //
+  // The call to prctl() may fail if the kernel was not configured with the
+  // CONFIG_ANON_VMA_NAME kernel option.  This is OK since the call is
+  // primarily a debugging aid.
+  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, sigstk.ss_sp, sigstk.ss_size,
+        "absl-signalstack");
+#endif
+#endif  // __linux__
+
   return true;
 }
 
@@ -214,22 +236,24 @@
 
 #endif
 
-static void WriteToStderr(const char* data) {
-  absl::base_internal::ErrnoSaver errno_saver;
-  absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
-}
-
-static void WriteSignalMessage(int signo, void (*writerfn)(const char*)) {
-  char buf[64];
+static void WriteSignalMessage(int signo, int cpu,
+                               void (*writerfn)(const char*)) {
+  char buf[96];
+  char on_cpu[32] = {0};
+  if (cpu != -1) {
+    snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu);
+  }
   const char* const signal_string =
       debugging_internal::FailureSignalToString(signo);
   if (signal_string != nullptr && signal_string[0] != '\0') {
-    snprintf(buf, sizeof(buf), "*** %s received at time=%ld ***\n",
+    snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
              signal_string,
-             static_cast<long>(time(nullptr)));  // NOLINT(runtime/int)
+             static_cast<long>(time(nullptr)),  // NOLINT(runtime/int)
+             on_cpu);
   } else {
-    snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld ***\n",
-             signo, static_cast<long>(time(nullptr)));  // NOLINT(runtime/int)
+    snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
+             signo, static_cast<long>(time(nullptr)),  // NOLINT(runtime/int)
+             on_cpu);
   }
   writerfn(buf);
 }
@@ -269,10 +293,10 @@
 // Called by AbslFailureSignalHandler() to write the failure info. It is
 // called once with writerfn set to WriteToStderr() and then possibly
 // with writerfn set to the user provided function.
-static void WriteFailureInfo(int signo, void* ucontext,
+static void WriteFailureInfo(int signo, void* ucontext, int cpu,
                              void (*writerfn)(const char*)) {
   WriterFnStruct writerfn_struct{writerfn};
-  WriteSignalMessage(signo, writerfn);
+  WriteSignalMessage(signo, cpu, writerfn);
   WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper,
                   &writerfn_struct);
 }
@@ -282,12 +306,13 @@
 // some platforms.
 static void PortableSleepForSeconds(int seconds) {
 #ifdef _WIN32
-  Sleep(seconds * 1000);
+  Sleep(static_cast<DWORD>(seconds * 1000));
 #else
   struct timespec sleep_time;
   sleep_time.tv_sec = seconds;
   sleep_time.tv_nsec = 0;
-  while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {}
+  while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
+  }
 #endif
 }
 
@@ -297,9 +322,7 @@
 // set amount of time. If AbslFailureSignalHandler() hangs for more than
 // the alarm timeout, ImmediateAbortSignalHandler() will abort the
 // program.
-static void ImmediateAbortSignalHandler(int) {
-  RaiseToDefaultHandler(SIGABRT);
-}
+static void ImmediateAbortSignalHandler(int) { RaiseToDefaultHandler(SIGABRT); }
 #endif
 
 // absl::base_internal::GetTID() returns pid_t on most platforms, but
@@ -316,9 +339,9 @@
 
   const GetTidType this_tid = absl::base_internal::GetTID();
   GetTidType previous_failed_tid = 0;
-  if (!failed_tid.compare_exchange_strong(
-          previous_failed_tid, static_cast<intptr_t>(this_tid),
-          std::memory_order_acq_rel, std::memory_order_relaxed)) {
+  if (!failed_tid.compare_exchange_strong(previous_failed_tid, this_tid,
+                                          std::memory_order_acq_rel,
+                                          std::memory_order_relaxed)) {
     ABSL_RAW_LOG(
         ERROR,
         "Signal %d raised at PC=%p while already in AbslFailureSignalHandler()",
@@ -334,22 +357,34 @@
     }
   }
 
+  // Increase the chance that the CPU we report was the same CPU on which the
+  // signal was received by doing this as early as possible, i.e. after
+  // verifying that this is not a recursive signal handler invocation.
+  int my_cpu = -1;
+#ifdef ABSL_HAVE_SCHED_GETCPU
+  my_cpu = sched_getcpu();
+#endif
+
 #ifdef ABSL_HAVE_ALARM
   // Set an alarm to abort the program in case this code hangs or deadlocks.
   if (fsh_options.alarm_on_failure_secs > 0) {
     alarm(0);  // Cancel any existing alarms.
     signal(SIGALRM, ImmediateAbortSignalHandler);
-    alarm(fsh_options.alarm_on_failure_secs);
+    alarm(static_cast<unsigned int>(fsh_options.alarm_on_failure_secs));
   }
 #endif
 
   // First write to stderr.
-  WriteFailureInfo(signo, ucontext, WriteToStderr);
+  WriteFailureInfo(
+      signo, ucontext, my_cpu, +[](const char* data) {
+        absl::raw_log_internal::AsyncSignalSafeWriteError(data, strlen(data));
+      });
 
   // Riskier code (because it is less likely to be async-signal-safe)
   // goes after this point.
   if (fsh_options.writerfn != nullptr) {
-    WriteFailureInfo(signo, ucontext, fsh_options.writerfn);
+    WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn);
+    fsh_options.writerfn(nullptr);
   }
 
   if (fsh_options.call_previous_handler) {
diff --git a/abseil-cpp/absl/debugging/failure_signal_handler.h b/abseil-cpp/absl/debugging/failure_signal_handler.h
index 0c0f585..5e03478 100644
--- a/abseil-cpp/absl/debugging/failure_signal_handler.h
+++ b/abseil-cpp/absl/debugging/failure_signal_handler.h
@@ -62,7 +62,7 @@
   // If true, try to run signal handlers on an alternate stack (if supported on
   // the given platform). An alternate stack is useful for program crashes due
   // to a stack overflow; by running on a alternate stack, the signal handler
-  // may run even when normal stack space has been exausted. The downside of
+  // may run even when normal stack space has been exhausted. The downside of
   // using an alternate stack is that extra memory for the alternate stack needs
   // to be pre-allocated.
   bool use_alternate_stack = true;
@@ -90,7 +90,7 @@
   // If non-null, indicates a pointer to a callback function that will be called
   // upon failure, with a string argument containing failure data. This function
   // may be used as a hook to write failure data to a secondary location, such
-  // as a log file. This function may also be called with null data, as a hint
+  // as a log file. This function will also be called with null data, as a hint
   // to flush any buffered data before the program may be terminated. Consider
   // flushing any buffered data in all calls to this function.
   //
diff --git a/abseil-cpp/absl/debugging/failure_signal_handler_test.cc b/abseil-cpp/absl/debugging/failure_signal_handler_test.cc
index d8283b2..72816a3 100644
--- a/abseil-cpp/absl/debugging/failure_signal_handler_test.cc
+++ b/abseil-cpp/absl/debugging/failure_signal_handler_test.cc
@@ -22,11 +22,12 @@
 #include <cstring>
 #include <fstream>
 
-#include "gtest/gtest.h"
 #include "gmock/gmock.h"
+#include "gtest/gtest.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/debugging/stacktrace.h"
 #include "absl/debugging/symbolize.h"
+#include "absl/log/check.h"
 #include "absl/strings/match.h"
 #include "absl/strings/str_cat.h"
 
@@ -87,7 +88,7 @@
 // This function runs in a fork()ed process on most systems.
 void InstallHandlerWithWriteToFileAndRaise(const char* file, int signo) {
   error_file = fopen(file, "w");
-  ABSL_RAW_CHECK(error_file != nullptr, "Failed create error_file");
+  CHECK_NE(error_file, nullptr) << "Failed create error_file";
   absl::FailureSignalHandlerOptions options;
   options.writerfn = WriteToErrorFile;
   absl::InstallFailureSignalHandler(options);
@@ -122,6 +123,12 @@
           "*** ", absl::debugging_internal::FailureSignalToString(signo),
           " received at ")));
 
+  // On platforms where it is possible to get the current CPU, the
+  // CPU number is also logged. Check that it is present in output.
+#if defined(__linux__)
+  EXPECT_THAT(error_line, testing::HasSubstr(" on cpu "));
+#endif
+
   if (absl::debugging_internal::StackTraceWorksForTest()) {
     std::getline(error_output, error_line);
     EXPECT_THAT(error_line, StartsWith("PC: "));
diff --git a/abseil-cpp/absl/debugging/internal/address_is_readable.cc b/abseil-cpp/absl/debugging/internal/address_is_readable.cc
index 329c285..91eaa76 100644
--- a/abseil-cpp/absl/debugging/internal/address_is_readable.cc
+++ b/abseil-cpp/absl/debugging/internal/address_is_readable.cc
@@ -30,16 +30,12 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#else
+#else  // __linux__ && !__ANDROID__
 
-#include <fcntl.h>
-#include <sys/syscall.h>
+#include <stdint.h>
+#include <syscall.h>
 #include <unistd.h>
 
-#include <atomic>
-#include <cerrno>
-#include <cstdint>
-
 #include "absl/base/internal/errno_saver.h"
 #include "absl/base/internal/raw_logging.h"
 
@@ -47,93 +43,54 @@
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
 
-// Pack a pid and two file descriptors into a 64-bit word,
-// using 16, 24, and 24 bits for each respectively.
-static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
-  ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
-                 "fd out of range");
-  return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
-}
-
-// Unpack x into a pid and two file descriptors, where x was created with
-// Pack().
-static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
-  *pid = x >> 48;
-  *read_fd = (x >> 24) & 0xffffff;
-  *write_fd = x & 0xffffff;
-}
-
-// Return whether the byte at *addr is readable, without faulting.
-// Save and restores errno.   Returns true on systems where
-// unimplemented.
-// This is a namespace-scoped variable for correct zero-initialization.
-static std::atomic<uint64_t> pid_and_fds;  // initially 0, an invalid pid.
-
+// NOTE: be extra careful about adding any interposable function calls here
+// (such as open(), read(), etc.). These symbols may be interposed and will get
+// invoked in contexts they don't expect.
+//
+// NOTE: any new system calls here may also require sandbox reconfiguration.
+//
 bool AddressIsReadable(const void *addr) {
+  // Align address on 8-byte boundary. On aarch64, checking last
+  // byte before inaccessible page returned unexpected EFAULT.
+  const uintptr_t u_addr = reinterpret_cast<uintptr_t>(addr) & ~uintptr_t{7};
+  addr = reinterpret_cast<const void *>(u_addr);
+
+  // rt_sigprocmask below will succeed for this input.
+  if (addr == nullptr) return false;
+
   absl::base_internal::ErrnoSaver errno_saver;
-  // We test whether a byte is readable by using write().  Normally, this would
-  // be done via a cached file descriptor to /dev/null, but linux fails to
-  // check whether the byte is readable when the destination is /dev/null, so
-  // we use a cached pipe.  We store the pid of the process that created the
-  // pipe to handle the case where a process forks, and the child closes all
-  // the file descriptors and then calls this routine.  This is not perfect:
-  // the child could use the routine, then close all file descriptors and then
-  // use this routine again.  But the likely use of this routine is when
-  // crashing, to test the validity of pages when dumping the stack.  Beware
-  // that we may leak file descriptors, but we're unlikely to leak many.
-  int bytes_written;
-  int current_pid = getpid() & 0xffff;   // we use only the low order 16 bits
-  do {  // until we do not get EBADF trying to use file descriptors
-    int pid;
-    int read_fd;
-    int write_fd;
-    uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
-    Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
-    while (current_pid != pid) {
-      int p[2];
-      // new pipe
-      if (pipe(p) != 0) {
-        ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
-      }
-      fcntl(p[0], F_SETFD, FD_CLOEXEC);
-      fcntl(p[1], F_SETFD, FD_CLOEXEC);
-      uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
-      if (pid_and_fds.compare_exchange_strong(
-              local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
-              std::memory_order_relaxed)) {
-        local_pid_and_fds = new_pid_and_fds;  // fds exposed to other threads
-      } else {  // fds not exposed to other threads; we can close them.
-        close(p[0]);
-        close(p[1]);
-        local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
-      }
-      Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
-    }
-    errno = 0;
-    // Use syscall(SYS_write, ...) instead of write() to prevent ASAN
-    // and other checkers from complaining about accesses to arbitrary
-    // memory.
-    do {
-      bytes_written = syscall(SYS_write, write_fd, addr, 1);
-    } while (bytes_written == -1 && errno == EINTR);
-    if (bytes_written == 1) {   // remove the byte from the pipe
-      char c;
-      while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
-      }
-    }
-    if (errno == EBADF) {  // Descriptors invalid.
-      // If pid_and_fds contains the problematic file descriptors we just used,
-      // this call will forget them, and the loop will try again.
-      pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
-                                          std::memory_order_release,
-                                          std::memory_order_relaxed);
-    }
-  } while (errno == EBADF);
-  return bytes_written == 1;
+
+  // Here we probe with some syscall which
+  // - accepts an 8-byte region of user memory as input
+  // - tests for EFAULT before other validation
+  // - has no problematic side-effects
+  //
+  // rt_sigprocmask(2) works for this.  It copies sizeof(kernel_sigset_t)==8
+  // bytes from the address into the kernel memory before any validation.
+  //
+  // The call can never succeed, since the `how` parameter is not one of
+  // SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK.
+  //
+  // This strategy depends on Linux implementation details,
+  // so we rely on the test to alert us if it stops working.
+  //
+  // Some discarded past approaches:
+  // - msync() doesn't reject PROT_NONE regions
+  // - write() on /dev/null doesn't return EFAULT
+  // - write() on a pipe requires creating it and draining the writes
+  // - connect() works but is problematic for sandboxes and needs a valid
+  //   file descriptor
+  //
+  // This can never succeed (invalid first argument to sigprocmask).
+  ABSL_RAW_CHECK(syscall(SYS_rt_sigprocmask, ~0, addr, nullptr,
+                         /*sizeof(kernel_sigset_t)*/ 8) == -1,
+                 "unexpected success");
+  ABSL_RAW_CHECK(errno == EFAULT || errno == EINVAL, "unexpected errno");
+  return errno != EFAULT;
 }
 
 }  // namespace debugging_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif
+#endif  // __linux__ && !__ANDROID__
diff --git a/abseil-cpp/absl/debugging/internal/demangle.cc b/abseil-cpp/absl/debugging/internal/demangle.cc
index 46cdb67..f283291 100644
--- a/abseil-cpp/absl/debugging/internal/demangle.cc
+++ b/abseil-cpp/absl/debugging/internal/demangle.cc
@@ -151,12 +151,12 @@
 // State needed for demangling.  This struct is copied in almost every stack
 // frame, so every byte counts.
 typedef struct {
-  int mangled_idx;                   // Cursor of mangled name.
-  int out_cur_idx;                   // Cursor of output string.
-  int prev_name_idx;                 // For constructors/destructors.
-  signed int prev_name_length : 16;  // For constructors/destructors.
-  signed int nest_level : 15;        // For nested names.
-  unsigned int append : 1;           // Append flag.
+  int mangled_idx;                     // Cursor of mangled name.
+  int out_cur_idx;                     // Cursor of output string.
+  int prev_name_idx;                   // For constructors/destructors.
+  unsigned int prev_name_length : 16;  // For constructors/destructors.
+  signed int nest_level : 15;          // For nested names.
+  unsigned int append : 1;             // Append flag.
   // Note: for some reason MSVC can't pack "bool append : 1" into the same int
   // with the above two fields, so we use an int instead.  Amusingly it can pack
   // "signed bool" as expected, but relying on that to continue to be a legal
@@ -235,8 +235,8 @@
 }
 
 // Returns true if "str" has at least "n" characters remaining.
-static bool AtLeastNumCharsRemaining(const char *str, int n) {
-  for (int i = 0; i < n; ++i) {
+static bool AtLeastNumCharsRemaining(const char *str, size_t n) {
+  for (size_t i = 0; i < n; ++i) {
     if (str[i] == '\0') {
       return false;
     }
@@ -253,18 +253,20 @@
   return prefix[i] == '\0';  // Consumed everything in "prefix".
 }
 
-static void InitState(State *state, const char *mangled, char *out,
-                      int out_size) {
+static void InitState(State* state,
+                      const char* mangled,
+                      char* out,
+                      size_t out_size) {
   state->mangled_begin = mangled;
   state->out = out;
-  state->out_end_idx = out_size;
+  state->out_end_idx = static_cast<int>(out_size);
   state->recursion_depth = 0;
   state->steps = 0;
 
   state->parse_state.mangled_idx = 0;
   state->parse_state.out_cur_idx = 0;
   state->parse_state.prev_name_idx = 0;
-  state->parse_state.prev_name_length = -1;
+  state->parse_state.prev_name_length = 0;
   state->parse_state.nest_level = -1;
   state->parse_state.append = true;
 }
@@ -356,8 +358,8 @@
 // Append "str" at "out_cur_idx".  If there is an overflow, out_cur_idx is
 // set to out_end_idx+1.  The output string is ensured to
 // always terminate with '\0' as long as there is no overflow.
-static void Append(State *state, const char *const str, const int length) {
-  for (int i = 0; i < length; ++i) {
+static void Append(State *state, const char *const str, const size_t length) {
+  for (size_t i = 0; i < length; ++i) {
     if (state->parse_state.out_cur_idx + 1 <
         state->out_end_idx) {  // +1 for '\0'
       state->out[state->parse_state.out_cur_idx++] = str[i];
@@ -386,24 +388,28 @@
 // by GCC 4.5.x and later versions (and our locally-modified version of GCC
 // 4.4.x) to indicate functions which have been cloned during optimization.
 // We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
+// Additionally, '_' is allowed along with the alphanumeric sequence.
 static bool IsFunctionCloneSuffix(const char *str) {
   size_t i = 0;
   while (str[i] != '\0') {
-    // Consume a single .<alpha>+.<digit>+ sequence.
-    if (str[i] != '.' || !IsAlpha(str[i + 1])) {
+    bool parsed = false;
+    // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
+    if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
+      parsed = true;
+      i += 2;
+      while (IsAlpha(str[i]) || str[i] == '_') {
+        ++i;
+      }
+    }
+    if (str[i] == '.' && IsDigit(str[i + 1])) {
+      parsed = true;
+      i += 2;
+      while (IsDigit(str[i])) {
+        ++i;
+      }
+    }
+    if (!parsed)
       return false;
-    }
-    i += 2;
-    while (IsAlpha(str[i])) {
-      ++i;
-    }
-    if (str[i] != '.' || !IsDigit(str[i + 1])) {
-      return false;
-    }
-    i += 2;
-    while (IsDigit(str[i])) {
-      ++i;
-    }
   }
   return true;  // Consumed everything in "str".
 }
@@ -416,7 +422,7 @@
 
 // Append "str" with some tweaks, iff "append" state is true.
 static void MaybeAppendWithLength(State *state, const char *const str,
-                                  const int length) {
+                                  const size_t length) {
   if (state->parse_state.append && length > 0) {
     // Append a space if the output buffer ends with '<' and "str"
     // starts with '<' to avoid <<<.
@@ -428,14 +434,14 @@
     if (state->parse_state.out_cur_idx < state->out_end_idx &&
         (IsAlpha(str[0]) || str[0] == '_')) {
       state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
-      state->parse_state.prev_name_length = length;
+      state->parse_state.prev_name_length = static_cast<unsigned int>(length);
     }
     Append(state, str, length);
   }
 }
 
 // Appends a positive decimal number to the output if appending is enabled.
-static bool MaybeAppendDecimal(State *state, unsigned int val) {
+static bool MaybeAppendDecimal(State *state, int val) {
   // Max {32-64}-bit unsigned int is 20 digits.
   constexpr size_t kMaxLength = 20;
   char buf[kMaxLength];
@@ -447,12 +453,12 @@
     // one-past-the-end and manipulate one character before the pointer.
     char *p = &buf[kMaxLength];
     do {  // val=0 is the only input that should write a leading zero digit.
-      *--p = (val % 10) + '0';
+      *--p = static_cast<char>((val % 10) + '0');
       val /= 10;
     } while (p > buf && val != 0);
 
     // 'p' landed on the last character we set.  How convenient.
-    Append(state, p, kMaxLength - (p - buf));
+    Append(state, p, kMaxLength - static_cast<size_t>(p - buf));
   }
 
   return true;
@@ -462,7 +468,7 @@
 // Returns true so that it can be placed in "if" conditions.
 static bool MaybeAppend(State *state, const char *const str) {
   if (state->parse_state.append) {
-    int length = StrLen(str);
+    size_t length = StrLen(str);
     MaybeAppendWithLength(state, str, length);
   }
   return true;
@@ -517,10 +523,10 @@
 
 // Returns true if the identifier of the given length pointed to by
 // "mangled_cur" is anonymous namespace.
-static bool IdentifierIsAnonymousNamespace(State *state, int length) {
+static bool IdentifierIsAnonymousNamespace(State *state, size_t length) {
   // Returns true if "anon_prefix" is a proper prefix of "mangled_cur".
   static const char anon_prefix[] = "_GLOBAL__N_";
-  return (length > static_cast<int>(sizeof(anon_prefix) - 1) &&
+  return (length > (sizeof(anon_prefix) - 1) &&
           StrPrefix(RemainingInput(state), anon_prefix));
 }
 
@@ -538,12 +544,13 @@
 static bool ParseNumber(State *state, int *number_out);
 static bool ParseFloatNumber(State *state);
 static bool ParseSeqId(State *state);
-static bool ParseIdentifier(State *state, int length);
+static bool ParseIdentifier(State *state, size_t length);
 static bool ParseOperatorName(State *state, int *arity);
 static bool ParseSpecialName(State *state);
 static bool ParseCallOffset(State *state);
 static bool ParseNVOffset(State *state);
 static bool ParseVOffset(State *state);
+static bool ParseAbiTags(State *state);
 static bool ParseCtorDtorName(State *state);
 static bool ParseDecltype(State *state);
 static bool ParseType(State *state);
@@ -597,7 +604,7 @@
 //
 // Reference:
 // - Itanium C++ ABI
-//   <https://mentorembedded.github.io/cxx-abi/abi.html#mangling>
+//   <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling>
 
 // <mangled-name> ::= _Z <encoding>
 static bool ParseMangledName(State *state) {
@@ -737,17 +744,42 @@
   return true;
 }
 
-// <unqualified-name> ::= <operator-name>
-//                    ::= <ctor-dtor-name>
-//                    ::= <source-name>
-//                    ::= <local-source-name> // GCC extension; see below.
-//                    ::= <unnamed-type-name>
+// <unqualified-name> ::= <operator-name> [<abi-tags>]
+//                    ::= <ctor-dtor-name> [<abi-tags>]
+//                    ::= <source-name> [<abi-tags>]
+//                    ::= <local-source-name> [<abi-tags>]
+//                    ::= <unnamed-type-name> [<abi-tags>]
+//
+// <local-source-name> is a GCC extension; see below.
 static bool ParseUnqualifiedName(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
-  return (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
-          ParseSourceName(state) || ParseLocalSourceName(state) ||
-          ParseUnnamedTypeName(state));
+  if (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
+      ParseSourceName(state) || ParseLocalSourceName(state) ||
+      ParseUnnamedTypeName(state)) {
+    return ParseAbiTags(state);
+  }
+  return false;
+}
+
+// <abi-tags> ::= <abi-tag> [<abi-tags>]
+// <abi-tag>  ::= B <source-name>
+static bool ParseAbiTags(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+
+  while (ParseOneCharToken(state, 'B')) {
+    ParseState copy = state->parse_state;
+    MaybeAppend(state, "[abi:");
+
+    if (!ParseSourceName(state)) {
+      state->parse_state = copy;
+      return false;
+    }
+    MaybeAppend(state, "]");
+  }
+
+  return true;
 }
 
 // <source-name> ::= <positive length number> <identifier>
@@ -756,7 +788,8 @@
   if (guard.IsTooComplex()) return false;
   ParseState copy = state->parse_state;
   int length = -1;
-  if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
+  if (ParseNumber(state, &length) &&
+      ParseIdentifier(state, static_cast<size_t>(length))) {
     return true;
   }
   state->parse_state = copy;
@@ -834,7 +867,7 @@
   uint64_t number = 0;
   for (; *p != '\0'; ++p) {
     if (IsDigit(*p)) {
-      number = number * 10 + (*p - '0');
+      number = number * 10 + static_cast<uint64_t>(*p - '0');
     } else {
       break;
     }
@@ -849,7 +882,7 @@
     state->parse_state.mangled_idx += p - RemainingInput(state);
     if (number_out != nullptr) {
       // Note: possibly truncate "number".
-      *number_out = number;
+      *number_out = static_cast<int>(number);
     }
     return true;
   }
@@ -893,10 +926,10 @@
 }
 
 // <identifier> ::= <unqualified source code identifier> (of given length)
-static bool ParseIdentifier(State *state, int length) {
+static bool ParseIdentifier(State *state, size_t length) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
-  if (length < 0 || !AtLeastNumCharsRemaining(RemainingInput(state), length)) {
+  if (!AtLeastNumCharsRemaining(RemainingInput(state), length)) {
     return false;
   }
   if (IdentifierIsAnonymousNamespace(state, length)) {
@@ -1613,6 +1646,7 @@
 //              ::= <2-ary operator-name> <expression> <expression>
 //              ::= <3-ary operator-name> <expression> <expression> <expression>
 //              ::= cl <expression>+ E
+//              ::= cp <simple-id> <expression>* E # Clang-specific.
 //              ::= cv <type> <expression>      # type (expression)
 //              ::= cv <type> _ <expression>* E # type (expr-list)
 //              ::= st <type>
@@ -1635,14 +1669,23 @@
     return true;
   }
 
-  // Object/function call expression.
   ParseState copy = state->parse_state;
+
+  // Object/function call expression.
   if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) &&
       ParseOneCharToken(state, 'E')) {
     return true;
   }
   state->parse_state = copy;
 
+  // Clang-specific "cp <simple-id> <expression>* E"
+  //   https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338
+  if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
+      ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
   // Function-param expression (level 0).
   if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
       Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
@@ -1933,7 +1976,7 @@
 }
 
 // The demangler entry point.
-bool Demangle(const char *mangled, char *out, int out_size) {
+bool Demangle(const char* mangled, char* out, size_t out_size) {
   State state;
   InitState(&state, mangled, out, out_size);
   return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
diff --git a/abseil-cpp/absl/debugging/internal/demangle.h b/abseil-cpp/absl/debugging/internal/demangle.h
index c314d9b..e1f1569 100644
--- a/abseil-cpp/absl/debugging/internal/demangle.h
+++ b/abseil-cpp/absl/debugging/internal/demangle.h
@@ -62,7 +62,7 @@
 // Demangle `mangled`.  On success, return true and write the
 // demangled symbol name to `out`.  Otherwise, return false.
 // `out` is modified even if demangling is unsuccessful.
-bool Demangle(const char *mangled, char *out, int out_size);
+bool Demangle(const char* mangled, char* out, size_t out_size);
 
 }  // namespace debugging_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/debugging/internal/demangle_test.cc b/abseil-cpp/absl/debugging/internal/demangle_test.cc
index 0bed735..faec72b 100644
--- a/abseil-cpp/absl/debugging/internal/demangle_test.cc
+++ b/abseil-cpp/absl/debugging/internal/demangle_test.cc
@@ -19,8 +19,8 @@
 
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/debugging/internal/stack_consumption.h"
+#include "absl/log/log.h"
 #include "absl/memory/memory.h"
 
 namespace absl {
@@ -38,7 +38,7 @@
   }
 }
 
-// Test corner cases of bounary conditions.
+// Test corner cases of boundary conditions.
 TEST(Demangle, CornerCases) {
   char tmp[10];
   EXPECT_TRUE(Demangle("_Z6foobarv", tmp, sizeof(tmp)));
@@ -70,16 +70,62 @@
   EXPECT_STREQ("Foo()", tmp);
   EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
   EXPECT_STREQ("Foo()", tmp);
-  // Invalid (truncated), should not demangle.
-  EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
+  // Demangle suffixes produced by -funique-internal-linkage-names.
+  EXPECT_TRUE(Demangle("_ZL3Foov.__uniq.12345", tmp, sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  EXPECT_TRUE(Demangle("_ZL3Foov.__uniq.12345.isra.2.constprop.18", tmp,
+                       sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  // Suffixes without the number should also demangle.
+  EXPECT_TRUE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  // Suffixes with just the number should also demangle.
+  EXPECT_TRUE(Demangle("_ZL3Foov.123", tmp, sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  // (.clone. followed by non-number), should also demangle.
+  EXPECT_TRUE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  // (.clone. followed by multiple numbers), should also demangle.
+  EXPECT_TRUE(Demangle("_ZL3Foov.clone.123.456", tmp, sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  // (a long valid suffix), should demangle.
+  EXPECT_TRUE(Demangle("_ZL3Foov.part.9.165493.constprop.775.31805", tmp,
+                       sizeof(tmp)));
+  EXPECT_STREQ("Foo()", tmp);
+  // Invalid (. without anything else), should not demangle.
+  EXPECT_FALSE(Demangle("_ZL3Foov.", tmp, sizeof(tmp)));
+  // Invalid (. with mix of alpha and digits), should not demangle.
+  EXPECT_FALSE(Demangle("_ZL3Foov.abc123", tmp, sizeof(tmp)));
   // Invalid (.clone. not followed by number), should not demangle.
   EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
-  // Invalid (.clone. followed by non-number), should not demangle.
-  EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
   // Invalid (.constprop. not followed by number), should not demangle.
   EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
 }
 
+// Test the GNU abi_tag extension.
+TEST(Demangle, AbiTags) {
+  char tmp[80];
+
+  // Mangled name generated via:
+  // struct [[gnu::abi_tag("abc")]] A{};
+  // A a;
+  EXPECT_TRUE(Demangle("_Z1aB3abc", tmp, sizeof(tmp)));
+  EXPECT_STREQ("a[abi:abc]", tmp);
+
+  // Mangled name generated via:
+  // struct B {
+  //   B [[gnu::abi_tag("xyz")]] (){};
+  // };
+  // B b;
+  EXPECT_TRUE(Demangle("_ZN1BC2B3xyzEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("B::B[abi:xyz]()", tmp);
+
+  // Mangled name generated via:
+  // [[gnu::abi_tag("foo", "bar")]] void C() {}
+  EXPECT_TRUE(Demangle("_Z1CB3barB3foov", tmp, sizeof(tmp)));
+  EXPECT_STREQ("C[abi:bar][abi:foo]()", tmp);
+}
+
 // Tests that verify that Demangle footprint is within some limit.
 // They are not to be run under sanitizers as the sanitizers increase
 // stack consumption by about 4x.
@@ -105,7 +151,7 @@
                                             int *stack_consumed) {
   g_mangled = mangled;
   *stack_consumed = GetSignalHandlerStackConsumption(DemangleSignalHandler);
-  ABSL_RAW_LOG(INFO, "Stack consumption of Demangle: %d", *stack_consumed);
+  LOG(INFO) << "Stack consumption of Demangle: " << *stack_consumed;
   return g_demangle_result;
 }
 
diff --git a/abseil-cpp/absl/debugging/internal/elf_mem_image.cc b/abseil-cpp/absl/debugging/internal/elf_mem_image.cc
index 24cc013..42dcd3c 100644
--- a/abseil-cpp/absl/debugging/internal/elf_mem_image.cc
+++ b/abseil-cpp/absl/debugging/internal/elf_mem_image.cc
@@ -22,6 +22,7 @@
 #include <string.h>
 #include <cassert>
 #include <cstddef>
+#include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 
 // From binutils/include/elf/common.h (this doesn't appear to be documented
@@ -43,11 +44,11 @@
 
 namespace {
 
-#if __WORDSIZE == 32
+#if __SIZEOF_POINTER__ == 4
 const int kElfClass = ELFCLASS32;
 int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); }
 int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); }
-#elif __WORDSIZE == 64
+#elif __SIZEOF_POINTER__ == 8
 const int kElfClass = ELFCLASS64;
 int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); }
 int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); }
@@ -90,7 +91,7 @@
     return 0;
   }
   // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
-  return hash_[1];
+  return static_cast<int>(hash_[1]);
 }
 
 const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
@@ -104,11 +105,9 @@
 }
 
 const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
-  ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range");
-  return GetTableElement<ElfW(Phdr)>(ehdr_,
-                                     ehdr_->e_phoff,
-                                     ehdr_->e_phentsize,
-                                     index);
+  ABSL_RAW_CHECK(index >= 0 && index < ehdr_->e_phnum, "index out of range");
+  return GetTableElement<ElfW(Phdr)>(ehdr_, ehdr_->e_phoff, ehdr_->e_phentsize,
+                                     static_cast<size_t>(index));
 }
 
 const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
@@ -158,7 +157,8 @@
   hash_      = nullptr;
   strsize_   = 0;
   verdefnum_ = 0;
-  link_base_ = ~0L;  // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
+  // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
+  link_base_ = ~ElfW(Addr){0};  // NOLINT(readability/braces)
   if (!base) {
     return;
   }
@@ -175,17 +175,17 @@
   }
   switch (base_as_char[EI_DATA]) {
     case ELFDATA2LSB: {
-      if (__LITTLE_ENDIAN != __BYTE_ORDER) {
-        assert(false);
-        return;
-      }
+#ifndef ABSL_IS_LITTLE_ENDIAN
+      assert(false);
+      return;
+#endif
       break;
     }
     case ELFDATA2MSB: {
-      if (__BIG_ENDIAN != __BYTE_ORDER) {
-        assert(false);
-        return;
-      }
+#ifndef ABSL_IS_BIG_ENDIAN
+      assert(false);
+      return;
+#endif
       break;
     }
     default: {
@@ -217,11 +217,11 @@
   }
   ptrdiff_t relocation =
       base_as_char - reinterpret_cast<const char *>(link_base_);
-  ElfW(Dyn) *dynamic_entry =
-      reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
-                                    relocation);
+  ElfW(Dyn)* dynamic_entry = reinterpret_cast<ElfW(Dyn)*>(
+      static_cast<intptr_t>(dynamic_program_header->p_vaddr) + relocation);
   for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
-    const ElfW(Xword) value = dynamic_entry->d_un.d_val + relocation;
+    const auto value =
+        static_cast<intptr_t>(dynamic_entry->d_un.d_val) + relocation;
     switch (dynamic_entry->d_tag) {
       case DT_HASH:
         hash_ = reinterpret_cast<ElfW(Word) *>(value);
@@ -239,10 +239,10 @@
         verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
         break;
       case DT_VERDEFNUM:
-        verdefnum_ = dynamic_entry->d_un.d_val;
+        verdefnum_ = static_cast<size_t>(dynamic_entry->d_un.d_val);
         break;
       case DT_STRSZ:
-        strsize_ = dynamic_entry->d_un.d_val;
+        strsize_ = static_cast<size_t>(dynamic_entry->d_un.d_val);
         break;
       default:
         // Unrecognized entries explicitly ignored.
@@ -350,7 +350,11 @@
   const ElfW(Versym) *version_symbol = image->GetVersym(index_);
   ABSL_RAW_CHECK(symbol && version_symbol, "");
   const char *const symbol_name = image->GetDynstr(symbol->st_name);
+#if defined(__NetBSD__)
+  const int version_index = version_symbol->vs_vers & VERSYM_VERSION;
+#else
   const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+#endif
   const ElfW(Verdef) *version_definition = nullptr;
   const char *version_name = "";
   if (symbol->st_shndx == SHN_UNDEF) {
diff --git a/abseil-cpp/absl/debugging/internal/elf_mem_image.h b/abseil-cpp/absl/debugging/internal/elf_mem_image.h
index 46bfade..e7fe6ab 100644
--- a/abseil-cpp/absl/debugging/internal/elf_mem_image.h
+++ b/abseil-cpp/absl/debugging/internal/elf_mem_image.h
@@ -31,8 +31,10 @@
 #error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
 #endif
 
-#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
-    !defined(__asmjs__) && !defined(__wasm__)
+#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \
+    !defined(__native_client__) && !defined(__asmjs__) &&             \
+    !defined(__wasm__) && !defined(__HAIKU__) && !defined(__sun) &&   \
+    !defined(__VXWORKS__) && !defined(__hexagon__)
 #define ABSL_HAVE_ELF_MEM_IMAGE 1
 #endif
 
@@ -40,6 +42,10 @@
 
 #include <link.h>  // for ElfW
 
+#if defined(__FreeBSD__) && !defined(ElfW)
+#define ElfW(x) __ElfN(x)
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
diff --git a/abseil-cpp/absl/debugging/internal/examine_stack.cc b/abseil-cpp/absl/debugging/internal/examine_stack.cc
index 6e5ff1f..3dd6ba1 100644
--- a/abseil-cpp/absl/debugging/internal/examine_stack.cc
+++ b/abseil-cpp/absl/debugging/internal/examine_stack.cc
@@ -20,7 +20,16 @@
 #include <unistd.h>
 #endif
 
-#ifdef __APPLE__
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_MMAP
+#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
+
+#if defined(__linux__) || defined(__APPLE__)
 #include <sys/ucontext.h>
 #endif
 
@@ -37,35 +46,158 @@
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
 
+namespace {
+constexpr int kDefaultDumpStackFramesLimit = 64;
+// The %p field width for printf() functions is two characters per byte,
+// and two extra for the leading "0x".
+constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
+ABSL_CONST_INIT SymbolizeUrlEmitter debug_stack_trace_hook = nullptr;
+
+// Async-signal safe mmap allocator.
+void* Allocate(size_t num_bytes) {
+#ifdef ABSL_HAVE_MMAP
+  void* p = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE,
+                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  return p == MAP_FAILED ? nullptr : p;
+#else
+  (void)num_bytes;
+  return nullptr;
+#endif  // ABSL_HAVE_MMAP
+}
+
+void Deallocate(void* p, size_t size) {
+#ifdef ABSL_HAVE_MMAP
+  ::munmap(p, size);
+#else
+  (void)p;
+  (void)size;
+#endif  // ABSL_HAVE_MMAP
+}
+
+// Print a program counter only.
+void DumpPC(OutputWriter* writer, void* writer_arg, void* const pc,
+            const char* const prefix) {
+  char buf[100];
+  snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth, pc);
+  writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding stack frame size.
+void DumpPCAndFrameSize(OutputWriter* writer, void* writer_arg, void* const pc,
+                        int framesize, const char* const prefix) {
+  char buf[100];
+  if (framesize <= 0) {
+    snprintf(buf, sizeof(buf), "%s@ %*p  (unknown)\n", prefix,
+             kPrintfPointerFieldWidth, pc);
+  } else {
+    snprintf(buf, sizeof(buf), "%s@ %*p  %9d\n", prefix,
+             kPrintfPointerFieldWidth, pc, framesize);
+  }
+  writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding symbol.
+void DumpPCAndSymbol(OutputWriter* writer, void* writer_arg, void* const pc,
+                     const char* const prefix) {
+  char tmp[1024];
+  const char* symbol = "(unknown)";
+  // Symbolizes the previous address of pc because pc may be in the
+  // next function.  The overrun happens when the function ends with
+  // a call to a function annotated noreturn (e.g. CHECK).
+  // If symbolization of pc-1 fails, also try pc on the off-chance
+  // that we crashed on the first instruction of a function (that
+  // actually happens very often for e.g. __restore_rt).
+  const uintptr_t prev_pc = reinterpret_cast<uintptr_t>(pc) - 1;
+  if (absl::Symbolize(reinterpret_cast<const char*>(prev_pc), tmp,
+                      sizeof(tmp)) ||
+      absl::Symbolize(pc, tmp, sizeof(tmp))) {
+    symbol = tmp;
+  }
+  char buf[1024];
+  snprintf(buf, sizeof(buf), "%s@ %*p  %s\n", prefix, kPrintfPointerFieldWidth,
+           pc, symbol);
+  writer(buf, writer_arg);
+}
+
+// Print a program counter, its stack frame size, and its symbol name.
+// Note that there is a separate symbolize_pc argument. Return addresses may be
+// at the end of the function, and this allows the caller to back up from pc if
+// appropriate.
+void DumpPCAndFrameSizeAndSymbol(OutputWriter* writer, void* writer_arg,
+                                 void* const pc, void* const symbolize_pc,
+                                 int framesize, const char* const prefix) {
+  char tmp[1024];
+  const char* symbol = "(unknown)";
+  if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
+    symbol = tmp;
+  }
+  char buf[1024];
+  if (framesize <= 0) {
+    snprintf(buf, sizeof(buf), "%s@ %*p  (unknown)  %s\n", prefix,
+             kPrintfPointerFieldWidth, pc, symbol);
+  } else {
+    snprintf(buf, sizeof(buf), "%s@ %*p  %9d  %s\n", prefix,
+             kPrintfPointerFieldWidth, pc, framesize, symbol);
+  }
+  writer(buf, writer_arg);
+}
+
+}  // namespace
+
+void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook) {
+  debug_stack_trace_hook = hook;
+}
+
+SymbolizeUrlEmitter GetDebugStackTraceHook() { return debug_stack_trace_hook; }
+
 // Returns the program counter from signal context, nullptr if
 // unknown. vuc is a ucontext_t*. We use void* to avoid the use of
 // ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc) {
+void* GetProgramCounter(void* const vuc) {
 #ifdef __linux__
   if (vuc != nullptr) {
     ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
 #if defined(__aarch64__)
     return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__alpha__)
+    return reinterpret_cast<void*>(context->uc_mcontext.sc_pc);
 #elif defined(__arm__)
     return reinterpret_cast<void*>(context->uc_mcontext.arm_pc);
+#elif defined(__hppa__)
+    return reinterpret_cast<void*>(context->uc_mcontext.sc_iaoq[0]);
 #elif defined(__i386__)
     if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
       return reinterpret_cast<void*>(context->uc_mcontext.gregs[14]);
+#elif defined(__ia64__)
+    return reinterpret_cast<void*>(context->uc_mcontext.sc_ip);
+#elif defined(__m68k__)
+    return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
 #elif defined(__mips__)
     return reinterpret_cast<void*>(context->uc_mcontext.pc);
 #elif defined(__powerpc64__)
     return reinterpret_cast<void*>(context->uc_mcontext.gp_regs[32]);
 #elif defined(__powerpc__)
-    return reinterpret_cast<void*>(context->uc_mcontext.regs->nip);
+    return reinterpret_cast<void*>(context->uc_mcontext.uc_regs->gregs[32]);
 #elif defined(__riscv)
     return reinterpret_cast<void*>(context->uc_mcontext.__gregs[REG_PC]);
 #elif defined(__s390__) && !defined(__s390x__)
     return reinterpret_cast<void*>(context->uc_mcontext.psw.addr & 0x7fffffff);
 #elif defined(__s390__) && defined(__s390x__)
     return reinterpret_cast<void*>(context->uc_mcontext.psw.addr);
+#elif defined(__sh__)
+    return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__sparc__) && !defined(__arch64__)
+    return reinterpret_cast<void*>(context->uc_mcontext.gregs[19]);
+#elif defined(__sparc__) && defined(__arch64__)
+    return reinterpret_cast<void*>(context->uc_mcontext.mc_gregs[19]);
 #elif defined(__x86_64__)
     if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
       return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
+#elif defined(__e2k__)
+    return reinterpret_cast<void*>(context->uc_mcontext.cr0_hi);
+#elif defined(__loongarch__)
+    return reinterpret_cast<void*>(context->uc_mcontext.__pc);
 #else
 #error "Undefined Architecture."
 #endif
@@ -104,59 +236,17 @@
   return nullptr;
 }
 
-// The %p field width for printf() functions is two characters per byte,
-// and two extra for the leading "0x".
-static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
-
-// Print a program counter, its stack frame size, and its symbol name.
-// Note that there is a separate symbolize_pc argument. Return addresses may be
-// at the end of the function, and this allows the caller to back up from pc if
-// appropriate.
-static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
-                                        void* writerfn_arg, void* pc,
-                                        void* symbolize_pc, int framesize,
-                                        const char* const prefix) {
-  char tmp[1024];
-  const char* symbol = "(unknown)";
-  if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
-    symbol = tmp;
-  }
-  char buf[1024];
-  if (framesize <= 0) {
-    snprintf(buf, sizeof(buf), "%s@ %*p  (unknown)  %s\n", prefix,
-             kPrintfPointerFieldWidth, pc, symbol);
-  } else {
-    snprintf(buf, sizeof(buf), "%s@ %*p  %9d  %s\n", prefix,
-             kPrintfPointerFieldWidth, pc, framesize, symbol);
-  }
-  writerfn(buf, writerfn_arg);
-}
-
-// Print a program counter and the corresponding stack frame size.
-static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
-                               void* writerfn_arg, void* pc, int framesize,
-                               const char* const prefix) {
-  char buf[100];
-  if (framesize <= 0) {
-    snprintf(buf, sizeof(buf), "%s@ %*p  (unknown)\n", prefix,
-             kPrintfPointerFieldWidth, pc);
-  } else {
-    snprintf(buf, sizeof(buf), "%s@ %*p  %9d\n", prefix,
-             kPrintfPointerFieldWidth, pc, framesize);
-  }
-  writerfn(buf, writerfn_arg);
-}
-
-void DumpPCAndFrameSizesAndStackTrace(
-    void* pc, void* const stack[], int frame_sizes[], int depth,
-    int min_dropped_frames, bool symbolize_stacktrace,
-    void (*writerfn)(const char*, void*), void* writerfn_arg) {
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+                                      int frame_sizes[], int depth,
+                                      int min_dropped_frames,
+                                      bool symbolize_stacktrace,
+                                      OutputWriter* writer, void* writer_arg) {
   if (pc != nullptr) {
     // We don't know the stack frame size for PC, use 0.
     if (symbolize_stacktrace) {
-      DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
+      DumpPCAndFrameSizeAndSymbol(writer, writer_arg, pc, pc, 0, "PC: ");
     } else {
-      DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
+      DumpPCAndFrameSize(writer, writer_arg, pc, 0, "PC: ");
     }
   }
   for (int i = 0; i < depth; i++) {
@@ -166,22 +256,65 @@
       // call to a function annotated noreturn (e.g. CHECK). Note that we don't
       // do this for pc above, as the adjustment is only correct for return
       // addresses.
-      DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
+      DumpPCAndFrameSizeAndSymbol(writer, writer_arg, stack[i],
                                   reinterpret_cast<char*>(stack[i]) - 1,
                                   frame_sizes[i], "    ");
     } else {
-      DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
-                         "    ");
+      DumpPCAndFrameSize(writer, writer_arg, stack[i], frame_sizes[i], "    ");
     }
   }
   if (min_dropped_frames > 0) {
     char buf[100];
     snprintf(buf, sizeof(buf), "    @ ... and at least %d more frames\n",
              min_dropped_frames);
-    writerfn(buf, writerfn_arg);
+    writer(buf, writer_arg);
   }
 }
 
+// Dump current stack trace as directed by writer.
+// Make sure this function is not inlined to avoid skipping too many top frames.
+ABSL_ATTRIBUTE_NOINLINE
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+                    bool symbolize_stacktrace, OutputWriter* writer,
+                    void* writer_arg) {
+  // Print stack trace
+  void* stack_buf[kDefaultDumpStackFramesLimit];
+  void** stack = stack_buf;
+  int num_stack = kDefaultDumpStackFramesLimit;
+  size_t allocated_bytes = 0;
+
+  if (num_stack >= max_num_frames) {
+    // User requested fewer frames than we already have space for.
+    num_stack = max_num_frames;
+  } else {
+    const size_t needed_bytes =
+        static_cast<size_t>(max_num_frames) * sizeof(stack[0]);
+    void* p = Allocate(needed_bytes);
+    if (p != nullptr) {  // We got the space.
+      num_stack = max_num_frames;
+      stack = reinterpret_cast<void**>(p);
+      allocated_bytes = needed_bytes;
+    }
+  }
+
+  int depth = absl::GetStackTrace(stack, num_stack, min_dropped_frames + 1);
+  for (int i = 0; i < depth; i++) {
+    if (symbolize_stacktrace) {
+      DumpPCAndSymbol(writer, writer_arg, stack[static_cast<size_t>(i)],
+                      "    ");
+    } else {
+      DumpPC(writer, writer_arg, stack[static_cast<size_t>(i)], "    ");
+    }
+  }
+
+  auto hook = GetDebugStackTraceHook();
+  if (hook != nullptr) {
+    (*hook)(stack, depth, writer, writer_arg);
+  }
+
+  if (allocated_bytes != 0) Deallocate(stack, allocated_bytes);
+}
+
 }  // namespace debugging_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/debugging/internal/examine_stack.h b/abseil-cpp/absl/debugging/internal/examine_stack.h
index 3933691..190af87 100644
--- a/abseil-cpp/absl/debugging/internal/examine_stack.h
+++ b/abseil-cpp/absl/debugging/internal/examine_stack.h
@@ -23,17 +23,39 @@
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
 
+// Type of function used for printing in stack trace dumping, etc.
+// We avoid closures to keep things simple.
+typedef void OutputWriter(const char*, void*);
+
+// RegisterDebugStackTraceHook() allows to register a single routine
+// `hook` that is called each time DumpStackTrace() is called.
+// `hook` may be called from a signal handler.
+typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth,
+                                    OutputWriter* writer, void* writer_arg);
+
+// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
+// This is inherently unsafe and must be signal safe code.
+void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook);
+SymbolizeUrlEmitter GetDebugStackTraceHook();
+
 // Returns the program counter from signal context, or nullptr if
 // unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
 // ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc);
+void* GetProgramCounter(void* const vuc);
 
-// Uses `writerfn` to dump the program counter, stack trace, and stack
+// Uses `writer` to dump the program counter, stack trace, and stack
 // frame sizes.
-void DumpPCAndFrameSizesAndStackTrace(
-    void* pc, void* const stack[], int frame_sizes[], int depth,
-    int min_dropped_frames, bool symbolize_stacktrace,
-    void (*writerfn)(const char*, void*), void* writerfn_arg);
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+                                      int frame_sizes[], int depth,
+                                      int min_dropped_frames,
+                                      bool symbolize_stacktrace,
+                                      OutputWriter* writer, void* writer_arg);
+
+// Dump current stack trace omitting the topmost `min_dropped_frames` stack
+// frames.
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+                    bool symbolize_stacktrace, OutputWriter* writer,
+                    void* writer_arg);
 
 }  // namespace debugging_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/debugging/internal/stack_consumption.cc b/abseil-cpp/absl/debugging/internal/stack_consumption.cc
index e3dd51c..3f40bea 100644
--- a/abseil-cpp/absl/debugging/internal/stack_consumption.cc
+++ b/abseil-cpp/absl/debugging/internal/stack_consumption.cc
@@ -18,14 +18,17 @@
 #ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
 
 #include <signal.h>
+#include <string.h>
 #include <sys/mman.h>
 #include <unistd.h>
 
-#include <string.h>
-
 #include "absl/base/attributes.h"
 #include "absl/base/internal/raw_logging.h"
 
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
@@ -43,7 +46,7 @@
 // unspecified. Therefore, instead we hardcode the direction of the
 // stack on platforms we know about.
 #if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \
-    defined(__aarch64__)
+    defined(__aarch64__) || defined(__riscv)
 constexpr bool kStackGrowsDown = true;
 #else
 #error Need to define kStackGrowsDown
@@ -162,7 +165,7 @@
     // versions of musl have a bug that rejects ss_size==0. Work around this by
     // setting ss_size to MINSIGSTKSZ, which should be ignored by the kernel
     // when SS_DISABLE is set.
-    old_sigstk.ss_size = MINSIGSTKSZ;
+    old_sigstk.ss_size = static_cast<size_t>(MINSIGSTKSZ);
   }
   ABSL_RAW_CHECK(sigaltstack(&old_sigstk, nullptr) == 0,
                  "sigaltstack() failed");
diff --git a/abseil-cpp/absl/debugging/internal/stack_consumption.h b/abseil-cpp/absl/debugging/internal/stack_consumption.h
index 2b5e715..f41b64c 100644
--- a/abseil-cpp/absl/debugging/internal/stack_consumption.h
+++ b/abseil-cpp/absl/debugging/internal/stack_consumption.h
@@ -26,7 +26,7 @@
 #error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly
 #elif !defined(__APPLE__) && !defined(_WIN32) &&                     \
     (defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \
-     defined(__aarch64__))
+     defined(__aarch64__) || defined(__riscv))
 #define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1
 
 namespace absl {
diff --git a/abseil-cpp/absl/debugging/internal/stack_consumption_test.cc b/abseil-cpp/absl/debugging/internal/stack_consumption_test.cc
index 80445bf..0255ac8 100644
--- a/abseil-cpp/absl/debugging/internal/stack_consumption_test.cc
+++ b/abseil-cpp/absl/debugging/internal/stack_consumption_test.cc
@@ -20,7 +20,7 @@
 #include <string.h>
 
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -33,7 +33,7 @@
 
   // Never true, but prevents compiler from optimizing buf out.
   if (signo == 0) {
-    ABSL_RAW_LOG(INFO, "%p", static_cast<void*>(buf));
+    LOG(INFO) << static_cast<void*>(buf);
   }
 }
 
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
index 14a76f1..3f08716 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -13,13 +13,18 @@
 #include <cassert>
 #include <cstdint>
 #include <iostream>
+#include <limits>
 
 #include "absl/base/attributes.h"
 #include "absl/debugging/internal/address_is_readable.h"
 #include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
 #include "absl/debugging/stacktrace.h"
 
-static const uintptr_t kUnknownFrameSize = 0;
+static const size_t kUnknownFrameSize = 0;
+// Stack end to use when we don't know the actual stack end
+// (effectively just the end of address space).
+constexpr uintptr_t kUnknownStackEnd =
+    std::numeric_limits<size_t>::max() - sizeof(void *);
 
 #if defined(__linux__)
 // Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
@@ -37,8 +42,11 @@
   absl::debugging_internal::VDSOSupport vdso;
   if (vdso.IsPresent()) {
     absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
-    if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC,
-                           &symbol_info) ||
+    auto lookup = [&](int type) {
+      return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
+                               &symbol_info);
+    };
+    if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
         symbol_info.address == nullptr) {
       // Unexpected: VDSO is present, yet the expected symbol is missing
       // or null.
@@ -62,11 +70,12 @@
 // Compute the size of a stack frame in [low..high).  We assume that
 // low < high.  Return size of kUnknownFrameSize.
 template<typename T>
-static inline uintptr_t ComputeStackFrameSize(const T* low,
-                                              const T* high) {
+static inline size_t ComputeStackFrameSize(const T* low,
+                                           const T* high) {
   const char* low_char_ptr = reinterpret_cast<const char *>(low);
   const char* high_char_ptr = reinterpret_cast<const char *>(high);
-  return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
+  return low < high ? static_cast<size_t>(high_char_ptr - low_char_ptr)
+                    : kUnknownFrameSize;
 }
 
 // Given a pointer to a stack frame, locate and return the calling
@@ -75,8 +84,9 @@
 // "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
 template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
 ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
-static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY  // May read random elements from stack.
+static void **NextStackFrame(void **old_frame_pointer, const void *uc,
+                             size_t stack_low, size_t stack_high) {
   void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
   bool check_frame_size = true;
 
@@ -90,16 +100,21 @@
       void **const pre_signal_frame_pointer =
           reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
 
+      // The most recent signal always needs special handling to find the frame
+      // pointer, but a nested signal does not.  If pre_signal_frame_pointer is
+      // earlier in the stack than the old_frame_pointer, then use it. If it is
+      // later, then we have already unwound through it and it needs no special
+      // handling.
+      if (pre_signal_frame_pointer >= old_frame_pointer) {
+        new_frame_pointer = pre_signal_frame_pointer;
+      }
       // Check that alleged frame pointer is actually readable. This is to
       // prevent "double fault" in case we hit the first fault due to e.g.
       // stack corruption.
       if (!absl::debugging_internal::AddressIsReadable(
-              pre_signal_frame_pointer))
+              new_frame_pointer))
         return nullptr;
 
-      // Alleged frame pointer is readable, use it for further unwinding.
-      new_frame_pointer = pre_signal_frame_pointer;
-
       // Skip frame size check if we return from a signal. We may be using a
       // an alternate stack for signals.
       check_frame_size = false;
@@ -107,18 +122,36 @@
   }
 #endif
 
-  // aarch64 ABI requires stack pointer to be 16-byte-aligned.
-  if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
+  // The frame pointer should be 8-byte aligned.
+  if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
     return nullptr;
 
   // Check frame size.  In strict mode, we assume frames to be under
   // 100,000 bytes.  In non-strict mode, we relax the limit to 1MB.
   if (check_frame_size) {
-    const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
-    const uintptr_t frame_size =
+    const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
+    const size_t frame_size =
         ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
-    if (frame_size == kUnknownFrameSize || frame_size > max_size)
-      return nullptr;
+    if (frame_size == kUnknownFrameSize)
+       return nullptr;
+    // A very large frame may mean corrupt memory or an erroneous frame
+    // pointer. But also maybe just a plain-old large frame.  Assume that if the
+    // frame is within the known stack, then it is valid.
+    if (frame_size > max_size) {
+       if (stack_high < kUnknownStackEnd &&
+          static_cast<size_t>(getpagesize()) < stack_low) {
+        const uintptr_t new_fp_u =
+            reinterpret_cast<uintptr_t>(new_frame_pointer);
+        // Stack bounds are known.
+        if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+          // new_frame_pointer is not within the known stack.
+          return nullptr;
+        }
+      } else {
+        // Stack bounds are unknown, prefer truncated stack to possible crash.
+        return nullptr;
+      }
+    }
   }
 
   return new_frame_pointer;
@@ -134,51 +167,64 @@
 #else
 # error reading stack point not yet supported on this platform.
 #endif
-
   skip_count++;    // Skip the frame for this function.
   int n = 0;
 
+  // Assume that the first page is not stack.
+  size_t stack_low = static_cast<size_t>(getpagesize());
+  size_t stack_high = kUnknownStackEnd;
+
   // The frame pointer points to low address of a frame.  The first 64-bit
   // word of a frame points to the next frame up the call chain, which normally
   // is just after the high address of the current frame.  The second word of
-  // a frame contains return adress of to the caller.   To find a pc value
+  // a frame contains return address of to the caller.   To find a pc value
   // associated with the current frame, we need to go down a level in the call
   // chain.  So we remember return the address of the last frame seen.  This
   // does not work for the first stack frame, which belongs to UnwindImp() but
   // we skip the frame for UnwindImp() anyway.
   void* prev_return_address = nullptr;
+  // The nth frame size is the difference between the nth frame pointer and the
+  // the frame pointer below it in the call chain. There is no frame below the
+  // leaf frame, but this function is the leaf anyway, and we skip it.
+  void** prev_frame_pointer = nullptr;
 
-  while (frame_pointer && n < max_depth) {
-    // The absl::GetStackFrames routine is called when we are in some
-    // informational context (the failure signal handler for example).
-    // Use the non-strict unwinding rules to produce a stack trace
-    // that is as complete as possible (even if it contains a few bogus
-    // entries in some rare cases).
-    void **next_frame_pointer =
-        NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
-
+   while (frame_pointer && n < max_depth) {
     if (skip_count > 0) {
       skip_count--;
     } else {
       result[n] = prev_return_address;
       if (IS_STACK_FRAMES) {
-        sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+        sizes[n] = static_cast<int>(
+            ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
       }
       n++;
     }
     prev_return_address = frame_pointer[1];
-    frame_pointer = next_frame_pointer;
+    prev_frame_pointer = frame_pointer;
+    // The absl::GetStackFrames routine is called when we are in some
+    // informational context (the failure signal handler for example).
+    // Use the non-strict unwinding rules to produce a stack trace
+    // that is as complete as possible (even if it contains a few bogus
+    // entries in some rare cases).
+    frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+        frame_pointer, ucp, stack_low, stack_high);
   }
+
   if (min_dropped_frames != nullptr) {
     // Implementation detail: we clamp the max of frames we are willing to
     // count, so as not to spend too much time in the loop below.
     const int kMaxUnwind = 200;
-    int j = 0;
-    for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
-      frame_pointer =
-          NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+    int num_dropped_frames = 0;
+    for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+      if (skip_count > 0) {
+        skip_count--;
+      } else {
+        num_dropped_frames++;
+      }
+      frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+          frame_pointer, ucp, stack_low, stack_high);
     }
-    *min_dropped_frames = j;
+    *min_dropped_frames = num_dropped_frames;
   }
   return n;
 }
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc
index 2a1bf2e..102a2a1 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc
@@ -112,11 +112,16 @@
     // Implementation detail: we clamp the max of frames we are willing to
     // count, so as not to spend too much time in the loop below.
     const int kMaxUnwind = 200;
-    int j = 0;
-    for (; sp != nullptr && j < kMaxUnwind; j++) {
+    int num_dropped_frames = 0;
+    for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) {
+      if (skip_count > 0) {
+        skip_count--;
+      } else {
+        num_dropped_frames++;
+      }
       sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
     }
-    *min_dropped_frames = j;
+    *min_dropped_frames = num_dropped_frames;
   }
   return n;
 }
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_config.h b/abseil-cpp/absl/debugging/internal/stacktrace_config.h
index 90af852..3929b1b 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_config.h
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_config.h
@@ -21,6 +21,8 @@
 #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
 #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
 
+#include "absl/base/config.h"
+
 #if defined(ABSL_STACKTRACE_INL_HEADER)
 #error ABSL_STACKTRACE_INL_HEADER cannot be directly set
 
@@ -29,22 +31,16 @@
     "absl/debugging/internal/stacktrace_win32-inl.inc"
 
 #elif defined(__APPLE__)
+#ifdef ABSL_HAVE_THREAD_LOCAL
 // Thread local support required for UnwindImpl.
-// Notes:
-// * Xcode's clang did not support `thread_local` until version 8, and
-//   even then not for all iOS < 9.0.
-// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
-//   targeting iOS 9.x.
-// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
-//   making __has_feature unreliable there.
-//
-// Otherwise, `__has_feature` is only supported by Clang so it has be inside
-// `defined(__APPLE__)` check.
-#if __has_feature(cxx_thread_local) && \
-    !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_generic-inl.inc"
-#endif
+#endif  // defined(ABSL_HAVE_THREAD_LOCAL)
+
+// Emscripten stacktraces rely on JS. Do not use them in standalone mode.
+#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
+#define ABSL_STACKTRACE_INL_HEADER \
+  "absl/debugging/internal/stacktrace_emscripten-inl.inc"
 
 #elif defined(__linux__) && !defined(__ANDROID__)
 
@@ -60,7 +56,7 @@
 // Note: When using glibc this may require -funwind-tables to function properly.
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_generic-inl.inc"
-#endif
+#endif  // __has_include(<execinfo.h>)
 #elif defined(__i386__) || defined(__x86_64__)
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_x86-inl.inc"
@@ -70,15 +66,18 @@
 #elif defined(__aarch64__)
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_aarch64-inl.inc"
+#elif defined(__riscv)
+#define ABSL_STACKTRACE_INL_HEADER \
+  "absl/debugging/internal/stacktrace_riscv-inl.inc"
 #elif defined(__has_include)
 #if __has_include(<execinfo.h>)
 // Note: When using glibc this may require -funwind-tables to function properly.
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_generic-inl.inc"
-#endif
-#endif
+#endif  // __has_include(<execinfo.h>)
+#endif  // defined(__has_include)
 
-#endif
+#endif  // defined(__linux__) && !defined(__ANDROID__)
 
 // Fallback to the empty implementation.
 #if !defined(ABSL_STACKTRACE_INL_HEADER)
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc
new file mode 100644
index 0000000..0f44451
--- /dev/null
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc
@@ -0,0 +1,110 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Portable implementation - just use glibc
+//
+// Note:  The glibc implementation may cause a call to malloc.
+// This can cause a deadlock in HeapProfiler.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
+
+#include <emscripten.h>
+
+#include <atomic>
+#include <cstring>
+
+#include "absl/base/attributes.h"
+#include "absl/debugging/stacktrace.h"
+
+extern "C" {
+uintptr_t emscripten_stack_snapshot();
+uint32_t emscripten_stack_unwind_buffer(uintptr_t pc, void *buffer,
+                                        uint32_t depth);
+}
+
+// Sometimes, we can try to get a stack trace from within a stack
+// trace, which can cause a self-deadlock.
+// Protect against such reentrant call by failing to get a stack trace.
+//
+// We use __thread here because the code here is extremely low level -- it is
+// called while collecting stack traces from within malloc and mmap, and thus
+// can not call anything which might call malloc or mmap itself.
+static __thread int recursive = 0;
+
+// The stack trace function might be invoked very early in the program's
+// execution (e.g. from the very first malloc).
+// As such, we suppress usage of backtrace during this early stage of execution.
+static std::atomic<bool> disable_stacktraces(true);  // Disabled until healthy.
+// Waiting until static initializers run seems to be late enough.
+// This file is included into stacktrace.cc so this will only run once.
+ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
+  // Check if we can even create stacktraces. If not, bail early and leave
+  // disable_stacktraces set as-is.
+  // clang-format off
+  if (!EM_ASM_INT({ return (typeof wasmOffsetConverter !== 'undefined'); })) {
+    return 0;
+  }
+  // clang-format on
+  disable_stacktraces.store(false, std::memory_order_relaxed);
+  return 0;
+}();
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+  if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
+    return 0;
+  }
+  ++recursive;
+
+  static_cast<void>(ucp);  // Unused.
+  constexpr int kStackLength = 64;
+  void *stack[kStackLength];
+
+  int size;
+  uintptr_t pc = emscripten_stack_snapshot();
+  size = emscripten_stack_unwind_buffer(pc, stack, kStackLength);
+
+  int result_count = size - skip_count;
+  if (result_count < 0) result_count = 0;
+  if (result_count > max_depth) result_count = max_depth;
+  for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count];
+
+  if (IS_STACK_FRAMES) {
+    // No implementation for finding out the stack frame sizes yet.
+    memset(sizes, 0, sizeof(*sizes) * result_count);
+  }
+  if (min_dropped_frames != nullptr) {
+    if (size - skip_count - max_depth > 0) {
+      *min_dropped_frames = size - skip_count - max_depth;
+    } else {
+      *min_dropped_frames = 0;
+    }
+  }
+
+  --recursive;
+
+  return result_count;
+}
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() { return true; }
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc
index b2792a1..5fa169a 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc
@@ -80,7 +80,7 @@
 
   if (IS_STACK_FRAMES) {
     // No implementation for finding out the stack frame sizes yet.
-    memset(sizes, 0, sizeof(*sizes) * result_count);
+    memset(sizes, 0, sizeof(*sizes) * static_cast<size_t>(result_count));
   }
   if (min_dropped_frames != nullptr) {
     if (size - skip_count - max_depth > 0) {
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
index 2e7c2f4..a49ed2f 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -57,7 +57,7 @@
   // This check is in case the compiler doesn't define _CALL_SYSV.
   return *(sp+1);
 #else
-#error Need to specify the PPC ABI for your archiecture.
+#error Need to specify the PPC ABI for your architecture.
 #endif
 }
 
@@ -131,7 +131,12 @@
       const ucontext_t* signal_context =
           reinterpret_cast<const ucontext_t*>(uc);
       void **const sp_before_signal =
-          reinterpret_cast<void**>(signal_context->uc_mcontext.gp_regs[PT_R1]);
+#if defined(__PPC64__)
+          reinterpret_cast<void **>(signal_context->uc_mcontext.gp_regs[PT_R1]);
+#else
+          reinterpret_cast<void **>(
+              signal_context->uc_mcontext.uc_regs->gregs[PT_R1]);
+#endif
       // Check that alleged sp before signal is nonnull and is reasonably
       // aligned.
       if (sp_before_signal != nullptr &&
@@ -226,11 +231,16 @@
     // Implementation detail: we clamp the max of frames we are willing to
     // count, so as not to spend too much time in the loop below.
     const int kMaxUnwind = 1000;
-    int j = 0;
-    for (; next_sp != nullptr && j < kMaxUnwind; j++) {
+    int num_dropped_frames = 0;
+    for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) {
+      if (skip_count > 0) {
+        skip_count--;
+      } else {
+        num_dropped_frames++;
+      }
       next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
     }
-    *min_dropped_frames = j;
+    *min_dropped_frames = num_dropped_frames;
   }
   return n;
 }
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc
new file mode 100644
index 0000000..20183fa
--- /dev/null
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc
@@ -0,0 +1,191 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_
+
+// Generate stack trace for riscv
+
+#include <sys/ucontext.h>
+
+#include "absl/base/config.h"
+#if defined(__linux__)
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+#include <limits>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/debugging/stacktrace.h"
+
+static const uintptr_t kUnknownFrameSize = 0;
+
+// Compute the size of a stack frame in [low..high).  We assume that low < high.
+// Return size of kUnknownFrameSize.
+template <typename T>
+static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) {
+  const char *low_char_ptr = reinterpret_cast<const char *>(low);
+  const char *high_char_ptr = reinterpret_cast<const char *>(high);
+  return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
+}
+
+// Given a pointer to a stack frame, locate and return the calling stackframe,
+// or return null if no stackframe can be found. Perform sanity checks (the
+// strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
+static void ** NextStackFrame(void **old_frame_pointer, const void *uc,
+                              const std::pair<size_t, size_t> range) {
+  //               .
+  //               .
+  //               .
+  //   +-> +----------------+
+  //   |   | return address |
+  //   |   |   previous fp  |
+  //   |   |      ...       |
+  //   |   +----------------+ <-+
+  //   |   | return address |   |
+  //   +---|-  previous fp  |   |
+  //       |      ...       |   |
+  // $fp ->|----------------+   |
+  //       | return address |   |
+  //       |   previous fp -|---+
+  // $sp ->|      ...       |
+  //       +----------------+
+  void **new_frame_pointer = reinterpret_cast<void **>(old_frame_pointer[-2]);
+  uintptr_t frame_pointer = reinterpret_cast<uintptr_t>(new_frame_pointer);
+
+  // The RISCV ELF psABI mandates that the stack pointer is always 16-byte
+  // aligned.
+  // TODO(#1236) this doesn't hold for ILP32E which only mandates a 4-byte
+  // alignment.
+  if (frame_pointer & 15)
+    return nullptr;
+
+  // If the new frame pointer matches the signal context, avoid terminating
+  // early to deal with alternate signal stacks.
+  if (WITH_CONTEXT)
+    if (const ucontext_t *ucv = static_cast<const ucontext_t *>(uc))
+      // RISCV ELF psABI has the frame pointer at x8/fp/s0.
+      // -- RISCV psABI Table 18.2
+      if (ucv->uc_mcontext.__gregs[8] == frame_pointer)
+        return new_frame_pointer;
+
+  // Check frame size.  In strict mode, we assume frames to be under 100,000
+  // bytes.  In non-strict mode, we relax the limit to 1MB.
+  const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
+  const uintptr_t frame_size =
+      ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
+  if (frame_size == kUnknownFrameSize) {
+    if (STRICT_UNWINDING)
+      return nullptr;
+
+    // In non-strict mode permit non-contiguous stacks (e.g. alternate signal
+    // frame handling).
+    if (reinterpret_cast<uintptr_t>(new_frame_pointer) < range.first ||
+        reinterpret_cast<uintptr_t>(new_frame_pointer) > range.second)
+      return nullptr;
+  }
+
+  if (frame_size > max_size)
+    return nullptr;
+
+  return new_frame_pointer;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+  // The `frame_pointer` that is computed here points to the top of the frame.
+  // The two words preceding the address are the return address and the previous
+  // frame pointer.
+#if defined(__GNUC__)
+  void **frame_pointer = reinterpret_cast<void **>(__builtin_frame_address(0));
+#else
+#error reading stack pointer not yet supported on this platform
+#endif
+
+  std::pair<size_t, size_t> stack = {
+      // assume that the first page is not the stack.
+      static_cast<size_t>(sysconf(_SC_PAGESIZE)),
+      std::numeric_limits<size_t>::max() - sizeof(void *)
+  };
+
+  int n = 0;
+  void *return_address = nullptr;
+  while (frame_pointer && n < max_depth) {
+    return_address = frame_pointer[-1];
+
+    // The absl::GetStackFrames routine is called when we are in some
+    // informational context (the failure signal handler for example).  Use the
+    // non-strict unwinding rules to produce a stack trace that is as complete
+    // as possible (even if it contains a few bogus entries in some rare cases).
+    void **next_frame_pointer =
+        NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp,
+                                                          stack);
+
+    if (skip_count > 0) {
+      skip_count--;
+    } else {
+      result[n] = return_address;
+      if (IS_STACK_FRAMES) {
+        sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+      }
+      n++;
+    }
+
+    frame_pointer = next_frame_pointer;
+  }
+
+  if (min_dropped_frames != nullptr) {
+    // Implementation detail: we clamp the max of frames we are willing to
+    // count, so as not to spend too much time in the loop below.
+    const int kMaxUnwind = 200;
+    int num_dropped_frames = 0;
+    for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+      if (skip_count > 0) {
+        skip_count--;
+      } else {
+        num_dropped_frames++;
+      }
+      frame_pointer =
+          NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp,
+                                                            stack);
+    }
+    *min_dropped_frames = num_dropped_frames;
+  }
+
+  return n;
+}
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() { return true; }
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc
index 1c666c8..ef2b973 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc
@@ -63,11 +63,12 @@
 template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
 static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
                       const void*, int* min_dropped_frames) {
-  int n = 0;
-  if (!RtlCaptureStackBackTrace_fn) {
-    // can't find a stacktrace with no function to call
+  USHORT n = 0;
+  if (!RtlCaptureStackBackTrace_fn || skip_count < 0 || max_depth < 0) {
+    // can't get a stacktrace with no function/invalid args
   } else {
-    n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0);
+    n = RtlCaptureStackBackTrace_fn(static_cast<ULONG>(skip_count) + 2,
+                                    static_cast<ULONG>(max_depth), result, 0);
   }
   if (IS_STACK_FRAMES) {
     // No implementation for finding out the stack frame sizes yet.
diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
index bc320ff..1975ba7 100644
--- a/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -27,20 +27,20 @@
 
 #include <cassert>
 #include <cstdint>
+#include <limits>
 
+#include "absl/base/attributes.h"
 #include "absl/base/macros.h"
 #include "absl/base/port.h"
 #include "absl/debugging/internal/address_is_readable.h"
 #include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
 #include "absl/debugging/stacktrace.h"
 
-#include "absl/base/internal/raw_logging.h"
-
 using absl::debugging_internal::AddressIsReadable;
 
 #if defined(__linux__) && defined(__i386__)
 // Count "push %reg" instructions in VDSO __kernel_vsyscall(),
-// preceeding "syscall" or "sysenter".
+// preceding "syscall" or "sysenter".
 // If __kernel_vsyscall uses frame pointer, answer 0.
 //
 // kMaxBytes tells how many instruction bytes of __kernel_vsyscall
@@ -112,6 +112,10 @@
 
 // Assume stack frames larger than 100,000 bytes are bogus.
 static const int kMaxFrameBytes = 100000;
+// Stack end to use when we don't know the actual stack end
+// (effectively just the end of address space).
+constexpr uintptr_t kUnknownStackEnd =
+    std::numeric_limits<size_t>::max() - sizeof(void *);
 
 // Returns the stack frame pointer from signal context, 0 if unknown.
 // vuc is a ucontext_t *.  We use void* to avoid the use
@@ -132,21 +136,21 @@
     const uintptr_t bp = 0;
     const uintptr_t sp = 0;
 #endif
-    // Sanity-check that the base pointer is valid.  It should be as long as
-    // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
-    // the process is compiled with --copt=-fomit-frame-pointer or
+    // Sanity-check that the base pointer is valid. It's possible that some
+    // code in the process is compiled with --copt=-fomit-frame-pointer or
     // --copt=-momit-leaf-frame-pointer.
     //
     // TODO(bcmills): -momit-leaf-frame-pointer is currently the default
     // behavior when building with clang.  Talk to the C++ toolchain team about
     // fixing that.
-    if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
+    if (bp >= sp && bp - sp <= kMaxFrameBytes)
+      return static_cast<uintptr_t>(bp);
 
     // If bp isn't a plausible frame pointer, return the stack pointer instead.
     // If we're lucky, it points to the start of a stack frame; otherwise, we'll
     // get one frame of garbage in the stack trace and fail the sanity check on
     // the next iteration.
-    return sp;
+    return static_cast<uintptr_t>(sp);
   }
 #endif
   return 0;
@@ -159,7 +163,8 @@
 template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
 ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
 ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
-static void **NextStackFrame(void **old_fp, const void *uc) {
+static void **NextStackFrame(void **old_fp, const void *uc,
+                             size_t stack_low, size_t stack_high) {
   void **new_fp = (void **)*old_fp;
 
 #if defined(__linux__) && defined(__i386__)
@@ -247,7 +252,7 @@
   // using an alternate signal stack.
   //
   // TODO(bcmills): The GetFP call should be completely unnecessary when
-  // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
+  // ENABLE_COMBINED_UNWINDER is set (because we should be back in the thread's
   // stack by this point), but it is empirically still needed (e.g. when the
   // stack includes a call to abort).  unw_get_reg returns UNW_EBADREG for some
   // frames.  Figure out why GetValidFrameAddr and/or libunwind isn't doing what
@@ -257,7 +262,37 @@
     // With the stack growing downwards, older stack frame must be
     // at a greater address that the current one.
     if (new_fp_u <= old_fp_u) return nullptr;
-    if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
+
+    // If we get a very large frame size, it may be an indication that we
+    // guessed frame pointers incorrectly and now risk a paging fault
+    // dereferencing a wrong frame pointer. Or maybe not because large frames
+    // are possible as well. The main stack is assumed to be readable,
+    // so we assume the large frame is legit if we know the real stack bounds
+    // and are within the stack.
+    if (new_fp_u - old_fp_u > kMaxFrameBytes) {
+      if (stack_high < kUnknownStackEnd &&
+          static_cast<size_t>(getpagesize()) < stack_low) {
+        // Stack bounds are known.
+        if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+          // new_fp_u is not within the known stack.
+          return nullptr;
+        }
+      } else {
+        // Stack bounds are unknown, prefer truncated stack to possible crash.
+        return nullptr;
+      }
+    }
+    if (stack_low < old_fp_u && old_fp_u <= stack_high) {
+      // Old BP was in the expected stack region...
+      if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+        // ... but new BP is outside of expected stack region.
+        // It is most likely bogus.
+        return nullptr;
+      }
+    } else {
+      // We may be here if we are executing in a co-routine with a
+      // separate stack. We can't do safety checks in this case.
+    }
   } else {
     if (new_fp == nullptr) return nullptr;  // skip AddressIsReadable() below
     // In the non-strict mode, allow discontiguous stack frames.
@@ -297,20 +332,27 @@
   int n = 0;
   void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
 
+  // Assume that the first page is not stack.
+  size_t stack_low = static_cast<size_t>(getpagesize());
+  size_t stack_high = kUnknownStackEnd;
+
   while (fp && n < max_depth) {
     if (*(fp + 1) == reinterpret_cast<void *>(0)) {
       // In 64-bit code, we often see a frame that
       // points to itself and has a return address of 0.
       break;
     }
-    void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
+    void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+        fp, ucp, stack_low, stack_high);
     if (skip_count > 0) {
       skip_count--;
     } else {
       result[n] = *(fp + 1);
       if (IS_STACK_FRAMES) {
         if (next_fp > fp) {
-          sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
+          sizes[n] = static_cast<int>(
+              reinterpret_cast<uintptr_t>(next_fp) -
+              reinterpret_cast<uintptr_t>(fp));
         } else {
           // A frame-size of 0 is used to indicate unknown frame size.
           sizes[n] = 0;
@@ -324,11 +366,17 @@
     // Implementation detail: we clamp the max of frames we are willing to
     // count, so as not to spend too much time in the loop below.
     const int kMaxUnwind = 1000;
-    int j = 0;
-    for (; fp != nullptr && j < kMaxUnwind; j++) {
-      fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
+    int num_dropped_frames = 0;
+    for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) {
+      if (skip_count > 0) {
+        skip_count--;
+      } else {
+        num_dropped_frames++;
+      }
+      fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp, stack_low,
+                                                             stack_high);
     }
-    *min_dropped_frames = j;
+    *min_dropped_frames = num_dropped_frames;
   }
   return n;
 }
diff --git a/abseil-cpp/absl/debugging/internal/symbolize.h b/abseil-cpp/absl/debugging/internal/symbolize.h
index b3729af..5593fde 100644
--- a/abseil-cpp/absl/debugging/internal/symbolize.h
+++ b/abseil-cpp/absl/debugging/internal/symbolize.h
@@ -28,8 +28,8 @@
 
 #ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
 #error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set
-#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
-    !defined(__asmjs__) && !defined(__wasm__)
+#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \
+      && !defined(__asmjs__) && !defined(__wasm__)
 #define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1
 
 #include <elf.h>
@@ -68,6 +68,12 @@
 #define ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE 1
 #endif
 
+#ifdef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
+#error ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE cannot be directly set
+#elif defined(__EMSCRIPTEN__)
+#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
@@ -109,7 +115,7 @@
 
 // Remove all installed decorators.  Returns true if successful, false if
 // symbolization is currently in progress.
-bool RemoveAllSymbolDecorators(void);
+bool RemoveAllSymbolDecorators();
 
 // Registers an address range to a file mapping.
 //
@@ -118,16 +124,14 @@
 //   filename != nullptr
 //
 // Returns true if the file was successfully registered.
-bool RegisterFileMappingHint(
-    const void* start, const void* end, uint64_t offset, const char* filename);
+bool RegisterFileMappingHint(const void* start, const void* end,
+                             uint64_t offset, const char* filename);
 
 // Looks up the file mapping registered by RegisterFileMappingHint for an
 // address range. If there is one, the file name is stored in *filename and
 // *start and *end are modified to reflect the registered mapping. Returns
 // whether any hint was found.
-bool GetFileMappingHint(const void** start,
-                        const void** end,
-                        uint64_t    *  offset,
+bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset,
                         const char** filename);
 
 }  // namespace debugging_internal
diff --git a/abseil-cpp/absl/debugging/internal/vdso_support.cc b/abseil-cpp/absl/debugging/internal/vdso_support.cc
index 6be16d9..8a588ea 100644
--- a/abseil-cpp/absl/debugging/internal/vdso_support.cc
+++ b/abseil-cpp/absl/debugging/internal/vdso_support.cc
@@ -20,12 +20,25 @@
 
 #ifdef ABSL_HAVE_VDSO_SUPPORT     // defined in vdso_support.h
 
+#if !defined(__has_include)
+#define __has_include(header) 0
+#endif
+
 #include <errno.h>
 #include <fcntl.h>
+#if __has_include(<syscall.h>)
+#include <syscall.h>
+#elif __has_include(<sys/syscall.h>)
 #include <sys/syscall.h>
+#endif
 #include <unistd.h>
 
-#if __GLIBC_PREREQ(2, 16)  // GLIBC-2.16 implements getauxval.
+#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
+    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+#define ABSL_HAVE_GETAUXVAL
+#endif
+
+#ifdef ABSL_HAVE_GETAUXVAL
 #include <sys/auxv.h>
 #endif
 
@@ -37,6 +50,17 @@
 #define AT_SYSINFO_EHDR 33  // for crosstoolv10
 #endif
 
+#if defined(__NetBSD__)
+using Elf32_auxv_t = Aux32Info;
+using Elf64_auxv_t = Aux64Info;
+#endif
+#if defined(__FreeBSD__)
+#if defined(__ELF_WORD_SIZE) && __ELF_WORD_SIZE == 64
+using Elf64_auxv_t = Elf64_Auxinfo;
+#endif
+using Elf32_auxv_t = Elf32_Auxinfo;
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace debugging_internal {
@@ -45,7 +69,9 @@
 std::atomic<const void *> VDSOSupport::vdso_base_(
     debugging_internal::ElfMemImage::kInvalidBase);
 
-std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
+ABSL_CONST_INIT std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(
+    &InitAndGetCPU);
+
 VDSOSupport::VDSOSupport()
     // If vdso_base_ is still set to kInvalidBase, we got here
     // before VDSOSupport::Init has been called. Call it now.
@@ -65,7 +91,7 @@
 // the operation should be idempotent.
 const void *VDSOSupport::Init() {
   const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
-#if __GLIBC_PREREQ(2, 16)
+#ifdef ABSL_HAVE_GETAUXVAL
   if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
     errno = 0;
     const void *const sysinfo_ehdr =
@@ -74,7 +100,7 @@
       vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
     }
   }
-#endif  // __GLIBC_PREREQ(2, 16)
+#endif  // ABSL_HAVE_GETAUXVAL
   if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
     int fd = open("/proc/self/auxv", O_RDONLY);
     if (fd == -1) {
@@ -86,8 +112,13 @@
     ElfW(auxv_t) aux;
     while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
       if (aux.a_type == AT_SYSINFO_EHDR) {
+#if defined(__NetBSD__)
+        vdso_base_.store(reinterpret_cast<void *>(aux.a_v),
+                         std::memory_order_relaxed);
+#else
         vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
                          std::memory_order_relaxed);
+#endif
         break;
       }
     }
@@ -162,8 +193,9 @@
 ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
 int GetCPU() {
   unsigned cpu;
-  int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
-  return ret_code == 0 ? cpu : ret_code;
+  long ret_code =  // NOLINT(runtime/int)
+      (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
+  return ret_code == 0 ? static_cast<int>(cpu) : static_cast<int>(ret_code);
 }
 
 }  // namespace debugging_internal
diff --git a/abseil-cpp/absl/debugging/leak_check.cc b/abseil-cpp/absl/debugging/leak_check.cc
index ff90495..fdb8798 100644
--- a/abseil-cpp/absl/debugging/leak_check.cc
+++ b/abseil-cpp/absl/debugging/leak_check.cc
@@ -11,33 +11,39 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-
+//
 // Wrappers around lsan_interface functions.
-// When lsan is not linked in, these functions are not available,
-// therefore Abseil code which depends on these functions is conditioned on the
-// definition of LEAK_SANITIZER.
+//
+// These are always-available run-time functions manipulating the LeakSanitizer,
+// even when the lsan_interface (and LeakSanitizer) is not available. When
+// LeakSanitizer is not linked in, these functions become no-op stubs.
+
 #include "absl/debugging/leak_check.h"
 
-#ifndef LEAK_SANITIZER
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
 
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-bool HaveLeakSanitizer() { return false; }
-void DoIgnoreLeak(const void*) { }
-void RegisterLivePointers(const void*, size_t) { }
-void UnRegisterLivePointers(const void*, size_t) { }
-LeakCheckDisabler::LeakCheckDisabler() { }
-LeakCheckDisabler::~LeakCheckDisabler() { }
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#else
+#if defined(ABSL_HAVE_LEAK_SANITIZER)
 
 #include <sanitizer/lsan_interface.h>
 
+#if ABSL_HAVE_ATTRIBUTE_WEAK
+extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off();
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 bool HaveLeakSanitizer() { return true; }
+
+#if ABSL_HAVE_ATTRIBUTE_WEAK
+bool LeakCheckerIsActive() {
+  return !(&__lsan_is_turned_off && __lsan_is_turned_off());
+}
+#else
+bool LeakCheckerIsActive() { return true; }
+#endif
+
+bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check(); }
 void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
 void RegisterLivePointers(const void* ptr, size_t size) {
   __lsan_register_root_region(ptr, size);
@@ -50,4 +56,18 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // LEAK_SANITIZER
+#else  // defined(ABSL_HAVE_LEAK_SANITIZER)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+bool HaveLeakSanitizer() { return false; }
+bool LeakCheckerIsActive() { return false; }
+void DoIgnoreLeak(const void*) { }
+void RegisterLivePointers(const void*, size_t) { }
+void UnRegisterLivePointers(const void*, size_t) { }
+LeakCheckDisabler::LeakCheckDisabler() = default;
+LeakCheckDisabler::~LeakCheckDisabler() = default;
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // defined(ABSL_HAVE_LEAK_SANITIZER)
diff --git a/abseil-cpp/absl/debugging/leak_check.h b/abseil-cpp/absl/debugging/leak_check.h
index 7a5a22d..6bd7940 100644
--- a/abseil-cpp/absl/debugging/leak_check.h
+++ b/abseil-cpp/absl/debugging/leak_check.h
@@ -24,7 +24,24 @@
 // Note: this leak checking API is not yet supported in MSVC.
 // Leak checking is enabled by default in all ASan builds.
 //
-// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer
+// is enabled. To use the mode, simply pass `-fsanitize=address` to both the
+// compiler and linker. An example Bazel command could be
+//
+//   $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ...
+//
+// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does
+// not also use AddressSanitizer). To use the mode, simply pass
+// `-fsanitize=leak` to both the compiler and linker. Since GCC does not
+// currently provide a way of detecting this mode at compile-time, GCC users
+// must also pass -DLEAK_SANITIZER to the compiler. An example Bazel command
+// could be
+//
+//   $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak
+//     --linkopt=-fsanitize=leak ...
 //
 // -----------------------------------------------------------------------------
 #ifndef ABSL_DEBUGGING_LEAK_CHECK_H_
@@ -43,6 +60,12 @@
 // currently built into this target.
 bool HaveLeakSanitizer();
 
+// LeakCheckerIsActive()
+//
+// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
+// currently built into this target and is turned on.
+bool LeakCheckerIsActive();
+
 // DoIgnoreLeak()
 //
 // Implements `IgnoreLeak()` below. This function should usually
@@ -62,7 +85,8 @@
 //
 // If the passed `ptr` does not point to an actively allocated object at the
 // time `IgnoreLeak()` is called, the call is a no-op; if it is actively
-// allocated, the object must not get deallocated later.
+// allocated, leak sanitizer will assume this object is referenced even if
+// there is no actual reference in user memory.
 //
 template <typename T>
 T* IgnoreLeak(T* ptr) {
@@ -70,6 +94,19 @@
   return ptr;
 }
 
+// FindAndReportLeaks()
+//
+// If any leaks are detected, prints a leak report and returns true.  This
+// function may be called repeatedly, and does not affect end-of-process leak
+// checking.
+//
+// Example:
+// if (FindAndReportLeaks()) {
+//   ... diagnostic already printed. Exit with failure code.
+//   exit(1)
+// }
+bool FindAndReportLeaks();
+
 // LeakCheckDisabler
 //
 // This helper class indicates that any heap allocations done in the code block
diff --git a/abseil-cpp/absl/debugging/leak_check_fail_test.cc b/abseil-cpp/absl/debugging/leak_check_fail_test.cc
index c49b81a..46e9fb6 100644
--- a/abseil-cpp/absl/debugging/leak_check_fail_test.cc
+++ b/abseil-cpp/absl/debugging/leak_check_fail_test.cc
@@ -13,9 +13,10 @@
 // limitations under the License.
 
 #include <memory>
+
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/debugging/leak_check.h"
+#include "absl/log/log.h"
 
 namespace {
 
@@ -25,7 +26,7 @@
   // failed exit code.
 
   char* foo = strdup("lsan should complain about this leaked string");
-  ABSL_RAW_LOG(INFO, "Should detect leaked string %s", foo);
+  LOG(INFO) << "Should detect leaked string " << foo;
 }
 
 TEST(LeakCheckTest, LeakMemoryAfterDisablerScope) {
@@ -34,8 +35,7 @@
   // failed exit code.
   { absl::LeakCheckDisabler disabler; }
   char* foo = strdup("lsan should also complain about this leaked string");
-  ABSL_RAW_LOG(INFO, "Re-enabled leak detection.Should detect leaked string %s",
-               foo);
+  LOG(INFO) << "Re-enabled leak detection.Should detect leaked string " << foo;
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/debugging/leak_check_test.cc b/abseil-cpp/absl/debugging/leak_check_test.cc
index b5cc487..6f0135e 100644
--- a/abseil-cpp/absl/debugging/leak_check_test.cc
+++ b/abseil-cpp/absl/debugging/leak_check_test.cc
@@ -15,28 +15,27 @@
 #include <string>
 
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/base/config.h"
 #include "absl/debugging/leak_check.h"
+#include "absl/log/log.h"
 
 namespace {
 
-TEST(LeakCheckTest, DetectLeakSanitizer) {
-#ifdef ABSL_EXPECT_LEAK_SANITIZER
-  EXPECT_TRUE(absl::HaveLeakSanitizer());
-#else
-  EXPECT_FALSE(absl::HaveLeakSanitizer());
-#endif
-}
-
 TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
+  if (!absl::LeakCheckerIsActive()) {
+    GTEST_SKIP() << "LeakChecker is not active";
+  }
   auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
-  ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str());
+  LOG(INFO) << "Ignoring leaked string " << foo;
 }
 
 TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
+  if (!absl::LeakCheckerIsActive()) {
+    GTEST_SKIP() << "LeakChecker is not active";
+  }
   absl::LeakCheckDisabler disabler;
   auto foo = new std::string("some string leaked while checks are disabled");
-  ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str());
+  LOG(INFO) << "Ignoring leaked string " << foo;
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/debugging/stacktrace.cc b/abseil-cpp/absl/debugging/stacktrace.cc
index 1f7c7d8..ff8069f 100644
--- a/abseil-cpp/absl/debugging/stacktrace.cc
+++ b/abseil-cpp/absl/debugging/stacktrace.cc
@@ -49,8 +49,10 @@
 
 # include "absl/debugging/internal/stacktrace_aarch64-inl.inc"
 # include "absl/debugging/internal/stacktrace_arm-inl.inc"
+# include "absl/debugging/internal/stacktrace_emscripten-inl.inc"
 # include "absl/debugging/internal/stacktrace_generic-inl.inc"
 # include "absl/debugging/internal/stacktrace_powerpc-inl.inc"
+# include "absl/debugging/internal/stacktrace_riscv-inl.inc"
 # include "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
 # include "absl/debugging/internal/stacktrace_win32-inl.inc"
 # include "absl/debugging/internal/stacktrace_x86-inl.inc"
diff --git a/abseil-cpp/absl/debugging/stacktrace_benchmark.cc b/abseil-cpp/absl/debugging/stacktrace_benchmark.cc
new file mode 100644
index 0000000..9360baf
--- /dev/null
+++ b/abseil-cpp/absl/debugging/stacktrace_benchmark.cc
@@ -0,0 +1,55 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/debugging/stacktrace.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+static constexpr int kMaxStackDepth = 100;
+static constexpr int kCacheSize = (1 << 16);
+void* pcs[kMaxStackDepth];
+
+ABSL_ATTRIBUTE_NOINLINE void func(benchmark::State& state, int x, int depth) {
+  if (x <= 0) {
+    // Touch a significant amount of memory so that the stack is likely to be
+    // not cached in the L1 cache.
+    state.PauseTiming();
+    int* arr = new int[kCacheSize];
+    for (int i = 0; i < kCacheSize; ++i) benchmark::DoNotOptimize(arr[i] = 100);
+    delete[] arr;
+    state.ResumeTiming();
+    benchmark::DoNotOptimize(absl::GetStackTrace(pcs, depth, 0));
+    return;
+  }
+  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+  func(state, --x, depth);
+}
+
+void BM_GetStackTrace(benchmark::State& state) {
+  int depth = state.range(0);
+  for (auto s : state) {
+    func(state, depth, depth);
+  }
+}
+
+BENCHMARK(BM_GetStackTrace)->DenseRange(10, kMaxStackDepth, 10);
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/debugging/stacktrace_test.cc b/abseil-cpp/absl/debugging/stacktrace_test.cc
new file mode 100644
index 0000000..31f7723
--- /dev/null
+++ b/abseil-cpp/absl/debugging/stacktrace_test.cc
@@ -0,0 +1,47 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/stacktrace.h"
+
+#include "gtest/gtest.h"
+#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+
+namespace {
+
+// This test is currently only known to pass on Linux x86_64/aarch64.
+#if defined(__linux__) && (defined(__x86_64__) || defined(__aarch64__))
+ABSL_ATTRIBUTE_NOINLINE void Unwind(void* p) {
+  ABSL_ATTRIBUTE_UNUSED static void* volatile sink = p;
+  constexpr int kSize = 16;
+  void* stack[kSize];
+  int frames[kSize];
+  absl::GetStackTrace(stack, kSize, 0);
+  absl::GetStackFrames(stack, frames, kSize, 0);
+}
+
+ABSL_ATTRIBUTE_NOINLINE void HugeFrame() {
+  char buffer[1 << 20];
+  Unwind(buffer);
+  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+}
+
+TEST(StackTrace, HugeFrame) {
+  // Ensure that the unwinder is not confused by very large stack frames.
+  HugeFrame();
+  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+}
+#endif
+
+}  // namespace
diff --git a/abseil-cpp/absl/debugging/symbolize.cc b/abseil-cpp/absl/debugging/symbolize.cc
index 5e4a25d..638d395 100644
--- a/abseil-cpp/absl/debugging/symbolize.cc
+++ b/abseil-cpp/absl/debugging/symbolize.cc
@@ -23,6 +23,11 @@
 #endif
 #endif
 
+// Emscripten symbolization relies on JS. Do not use them in standalone mode.
+#if defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
+#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM
+#endif
+
 #if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE)
 #include "absl/debugging/symbolize_elf.inc"
 #elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32)
@@ -31,6 +36,8 @@
 #include "absl/debugging/symbolize_win32.inc"
 #elif defined(__APPLE__)
 #include "absl/debugging/symbolize_darwin.inc"
+#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM)
+#include "absl/debugging/symbolize_emscripten.inc"
 #else
 #include "absl/debugging/symbolize_unimplemented.inc"
 #endif
diff --git a/abseil-cpp/absl/debugging/symbolize_darwin.inc b/abseil-cpp/absl/debugging/symbolize_darwin.inc
index cdadd40..cf63d19 100644
--- a/abseil-cpp/absl/debugging/symbolize_darwin.inc
+++ b/abseil-cpp/absl/debugging/symbolize_darwin.inc
@@ -77,19 +77,20 @@
 
   char tmp_buf[1024];
   if (debugging_internal::Demangle(symbol.c_str(), tmp_buf, sizeof(tmp_buf))) {
-    int len = strlen(tmp_buf);
-    if (len + 1 <= out_size) {  // +1 for '\0'
+    size_t len = strlen(tmp_buf);
+    if (len + 1 <= static_cast<size_t>(out_size)) {  // +1 for '\0'
       assert(len < sizeof(tmp_buf));
       memmove(out, tmp_buf, len + 1);
     }
   } else {
-    strncpy(out, symbol.c_str(), out_size);
+    strncpy(out, symbol.c_str(), static_cast<size_t>(out_size));
   }
 
   if (out[out_size - 1] != '\0') {
     // strncpy() does not '\0' terminate when it truncates.
     static constexpr char kEllipsis[] = "...";
-    int ellipsis_size = std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+    size_t ellipsis_size =
+        std::min(sizeof(kEllipsis) - 1, static_cast<size_t>(out_size) - 1);
     memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
     out[out_size - 1] = '\0';
   }
diff --git a/abseil-cpp/absl/debugging/symbolize_elf.inc b/abseil-cpp/absl/debugging/symbolize_elf.inc
index 7c36fd1..30638cb 100644
--- a/abseil-cpp/absl/debugging/symbolize_elf.inc
+++ b/abseil-cpp/absl/debugging/symbolize_elf.inc
@@ -77,6 +77,10 @@
 #include "absl/debugging/internal/vdso_support.h"
 #include "absl/strings/string_view.h"
 
+#if defined(__FreeBSD__) && !defined(ElfW)
+#define ElfW(x) __ElfN(x)
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
@@ -201,7 +205,8 @@
 
   // PT_LOAD program header describing executable code.
   // Normally we expect just one, but SWIFT binaries have two.
-  std::array<ElfW(Phdr), 2> phdr;
+  // CUDA binaries have 3 (see cr/473913254 description).
+  std::array<ElfW(Phdr), 4> phdr;
 };
 
 // Build 4-way associative cache for symbols. Within each cache line, symbols
@@ -248,21 +253,21 @@
  public:
   AddrMap() : size_(0), allocated_(0), obj_(nullptr) {}
   ~AddrMap() { base_internal::LowLevelAlloc::Free(obj_); }
-  int Size() const { return size_; }
-  ObjFile *At(int i) { return &obj_[i]; }
+  size_t Size() const { return size_; }
+  ObjFile *At(size_t i) { return &obj_[i]; }
   ObjFile *Add();
   void Clear();
 
  private:
-  int size_;       // count of valid elements (<= allocated_)
-  int allocated_;  // count of allocated elements
-  ObjFile *obj_;   // array of allocated_ elements
+  size_t size_;       // count of valid elements (<= allocated_)
+  size_t allocated_;  // count of allocated elements
+  ObjFile *obj_;      // array of allocated_ elements
   AddrMap(const AddrMap &) = delete;
   AddrMap &operator=(const AddrMap &) = delete;
 };
 
 void AddrMap::Clear() {
-  for (int i = 0; i != size_; i++) {
+  for (size_t i = 0; i != size_; i++) {
     At(i)->~ObjFile();
   }
   size_ = 0;
@@ -270,7 +275,7 @@
 
 ObjFile *AddrMap::Add() {
   if (size_ == allocated_) {
-    int new_allocated = allocated_ * 2 + 50;
+    size_t new_allocated = allocated_ * 2 + 50;
     ObjFile *new_obj_ =
         static_cast<ObjFile *>(base_internal::LowLevelAlloc::AllocWithArena(
             new_allocated * sizeof(*new_obj_), SigSafeArena()));
@@ -296,7 +301,7 @@
 
  private:
   char *CopyString(const char *s) {
-    int len = strlen(s);
+    size_t len = strlen(s);
     char *dst = static_cast<char *>(
         base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena()));
     ABSL_RAW_CHECK(dst != nullptr, "out of memory");
@@ -317,8 +322,9 @@
   FindSymbolResult GetSymbolFromObjectFile(const ObjFile &obj,
                                            const void *const pc,
                                            const ptrdiff_t relocation,
-                                           char *out, int out_size,
-                                           char *tmp_buf, int tmp_buf_size);
+                                           char *out, size_t out_size,
+                                           char *tmp_buf, size_t tmp_buf_size);
+  const char *GetUncachedSymbol(const void *pc);
 
   enum {
     SYMBOL_BUF_SIZE = 3072,
@@ -348,11 +354,11 @@
 
 }  // namespace
 
-static int SymbolizerSize() {
+static size_t SymbolizerSize() {
 #if defined(__wasm__) || defined(__asmjs__)
-  int pagesize = getpagesize();
+  auto pagesize = static_cast<size_t>(getpagesize());
 #else
-  int pagesize = sysconf(_SC_PAGESIZE);
+  auto pagesize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
 #endif
   return ((sizeof(Symbolizer) - 1) / pagesize + 1) * pagesize;
 }
@@ -424,7 +430,7 @@
     if (len == 0) {  // Reached EOF.
       break;
     }
-    num_bytes += len;
+    num_bytes += static_cast<size_t>(len);
   }
   SAFE_ASSERT(num_bytes <= count);
   return static_cast<ssize_t>(num_bytes);
@@ -437,8 +443,8 @@
                               const off_t offset) {
   off_t off = lseek(fd, offset, SEEK_SET);
   if (off == (off_t)-1) {
-    ABSL_RAW_LOG(WARNING, "lseek(%d, %ju, SEEK_SET) failed: errno=%d", fd,
-                 static_cast<uintmax_t>(offset), errno);
+    ABSL_RAW_LOG(WARNING, "lseek(%d, %jd, SEEK_SET) failed: errno=%d", fd,
+                 static_cast<intmax_t>(offset), errno);
     return -1;
   }
   return ReadPersistent(fd, buf, count);
@@ -473,29 +479,37 @@
 // inlined.
 static ABSL_ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(
     const int fd, ElfW(Half) sh_num, const off_t sh_offset, ElfW(Word) type,
-    ElfW(Shdr) * out, char *tmp_buf, int tmp_buf_size) {
+    ElfW(Shdr) * out, char *tmp_buf, size_t tmp_buf_size) {
   ElfW(Shdr) *buf = reinterpret_cast<ElfW(Shdr) *>(tmp_buf);
-  const int buf_entries = tmp_buf_size / sizeof(buf[0]);
-  const int buf_bytes = buf_entries * sizeof(buf[0]);
+  const size_t buf_entries = tmp_buf_size / sizeof(buf[0]);
+  const size_t buf_bytes = buf_entries * sizeof(buf[0]);
 
-  for (int i = 0; i < sh_num;) {
-    const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
-    const ssize_t num_bytes_to_read =
+  for (size_t i = 0; static_cast<int>(i) < sh_num;) {
+    const size_t num_bytes_left =
+        (static_cast<size_t>(sh_num) - i) * sizeof(buf[0]);
+    const size_t num_bytes_to_read =
         (buf_bytes > num_bytes_left) ? num_bytes_left : buf_bytes;
-    const off_t offset = sh_offset + i * sizeof(buf[0]);
+    const off_t offset = sh_offset + static_cast<off_t>(i * sizeof(buf[0]));
     const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read, offset);
-    if (len % sizeof(buf[0]) != 0) {
+    if (len < 0) {
       ABSL_RAW_LOG(
           WARNING,
-          "Reading %zd bytes from offset %ju returned %zd which is not a "
+          "Reading %zu bytes from offset %ju returned %zd which is negative.",
+          num_bytes_to_read, static_cast<intmax_t>(offset), len);
+      return false;
+    }
+    if (static_cast<size_t>(len) % sizeof(buf[0]) != 0) {
+      ABSL_RAW_LOG(
+          WARNING,
+          "Reading %zu bytes from offset %jd returned %zd which is not a "
           "multiple of %zu.",
-          num_bytes_to_read, static_cast<uintmax_t>(offset), len,
+          num_bytes_to_read, static_cast<intmax_t>(offset), len,
           sizeof(buf[0]));
       return false;
     }
-    const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+    const size_t num_headers_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
     SAFE_ASSERT(num_headers_in_buf <= buf_entries);
-    for (int j = 0; j < num_headers_in_buf; ++j) {
+    for (size_t j = 0; j < num_headers_in_buf; ++j) {
       if (buf[j].sh_type == type) {
         *out = buf[j];
         return true;
@@ -518,9 +532,14 @@
     return false;
   }
 
+  // Technically it can be larger, but in practice this never happens.
+  if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+    return false;
+  }
+
   ElfW(Shdr) shstrtab;
-  off_t shstrtab_offset =
-      (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx);
+  off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
+                          elf_header.e_shentsize * elf_header.e_shstrndx;
   if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
     return false;
   }
@@ -528,22 +547,23 @@
   for (int i = 0; i < elf_header.e_shnum; ++i) {
     ElfW(Shdr) out;
     off_t section_header_offset =
-        (elf_header.e_shoff + elf_header.e_shentsize * i);
+        static_cast<off_t>(elf_header.e_shoff) + elf_header.e_shentsize * i;
     if (!ReadFromOffsetExact(fd, &out, sizeof(out), section_header_offset)) {
       return false;
     }
-    off_t name_offset = shstrtab.sh_offset + out.sh_name;
+    off_t name_offset = static_cast<off_t>(shstrtab.sh_offset) + out.sh_name;
     char header_name[kMaxSectionNameLen];
     ssize_t n_read =
         ReadFromOffset(fd, &header_name, kMaxSectionNameLen, name_offset);
-    if (n_read == -1) {
+    if (n_read < 0) {
       return false;
     } else if (n_read > kMaxSectionNameLen) {
       // Long read?
       return false;
     }
 
-    absl::string_view name(header_name, strnlen(header_name, n_read));
+    absl::string_view name(header_name,
+                           strnlen(header_name, static_cast<size_t>(n_read)));
     if (!callback(name, out)) {
       break;
     }
@@ -569,20 +589,25 @@
     return false;
   }
 
+  // Technically it can be larger, but in practice this never happens.
+  if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+    return false;
+  }
+
   ElfW(Shdr) shstrtab;
-  off_t shstrtab_offset =
-      (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx);
+  off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
+                          elf_header.e_shentsize * elf_header.e_shstrndx;
   if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
     return false;
   }
 
   for (int i = 0; i < elf_header.e_shnum; ++i) {
     off_t section_header_offset =
-        (elf_header.e_shoff + elf_header.e_shentsize * i);
+        static_cast<off_t>(elf_header.e_shoff) + elf_header.e_shentsize * i;
     if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
       return false;
     }
-    off_t name_offset = shstrtab.sh_offset + out->sh_name;
+    off_t name_offset = static_cast<off_t>(shstrtab.sh_offset) + out->sh_name;
     ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
     if (n_read < 0) {
       return false;
@@ -633,17 +658,19 @@
 }
 
 // Return true if an address is inside a section.
-static bool InSection(const void *address, const ElfW(Shdr) * section) {
-  const char *start = reinterpret_cast<const char *>(section->sh_addr);
+static bool InSection(const void *address, ptrdiff_t relocation,
+                      const ElfW(Shdr) * section) {
+  const char *start = reinterpret_cast<const char *>(
+      section->sh_addr + static_cast<ElfW(Addr)>(relocation));
   size_t size = static_cast<size_t>(section->sh_size);
   return start <= address && address < (start + size);
 }
 
 static const char *ComputeOffset(const char *base, ptrdiff_t offset) {
-  // Note: cast to uintptr_t to avoid undefined behavior when base evaluates to
+  // Note: cast to intptr_t to avoid undefined behavior when base evaluates to
   // zero and offset is non-zero.
-  return reinterpret_cast<const char *>(
-      reinterpret_cast<uintptr_t>(base) + offset);
+  return reinterpret_cast<const char *>(reinterpret_cast<intptr_t>(base) +
+                                        offset);
 }
 
 // Read a symbol table and look for the symbol containing the
@@ -656,42 +683,45 @@
 // To keep stack consumption low, we would like this function to not get
 // inlined.
 static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
-    const void *const pc, const int fd, char *out, int out_size,
+    const void *const pc, const int fd, char *out, size_t out_size,
     ptrdiff_t relocation, const ElfW(Shdr) * strtab, const ElfW(Shdr) * symtab,
-    const ElfW(Shdr) * opd, char *tmp_buf, int tmp_buf_size) {
+    const ElfW(Shdr) * opd, char *tmp_buf, size_t tmp_buf_size) {
   if (symtab == nullptr) {
     return SYMBOL_NOT_FOUND;
   }
 
   // Read multiple symbols at once to save read() calls.
   ElfW(Sym) *buf = reinterpret_cast<ElfW(Sym) *>(tmp_buf);
-  const int buf_entries = tmp_buf_size / sizeof(buf[0]);
+  const size_t buf_entries = tmp_buf_size / sizeof(buf[0]);
 
-  const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+  const size_t num_symbols = symtab->sh_size / symtab->sh_entsize;
 
   // On platforms using an .opd section (PowerPC & IA64), a function symbol
   // has the address of a function descriptor, which contains the real
   // starting address.  However, we do not always want to use the real
   // starting address because we sometimes want to symbolize a function
   // pointer into the .opd section, e.g. FindSymbol(&foo,...).
-  const bool pc_in_opd =
-      kPlatformUsesOPDSections && opd != nullptr && InSection(pc, opd);
+  const bool pc_in_opd = kPlatformUsesOPDSections && opd != nullptr &&
+                         InSection(pc, relocation, opd);
   const bool deref_function_descriptor_pointer =
       kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd;
 
   ElfW(Sym) best_match;
   SafeMemZero(&best_match, sizeof(best_match));
   bool found_match = false;
-  for (int i = 0; i < num_symbols;) {
-    off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
-    const int num_remaining_symbols = num_symbols - i;
-    const int entries_in_chunk = std::min(num_remaining_symbols, buf_entries);
-    const int bytes_in_chunk = entries_in_chunk * sizeof(buf[0]);
+  for (size_t i = 0; i < num_symbols;) {
+    off_t offset =
+        static_cast<off_t>(symtab->sh_offset + i * symtab->sh_entsize);
+    const size_t num_remaining_symbols = num_symbols - i;
+    const size_t entries_in_chunk =
+        std::min(num_remaining_symbols, buf_entries);
+    const size_t bytes_in_chunk = entries_in_chunk * sizeof(buf[0]);
     const ssize_t len = ReadFromOffset(fd, buf, bytes_in_chunk, offset);
-    SAFE_ASSERT(len % sizeof(buf[0]) == 0);
-    const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+    SAFE_ASSERT(len >= 0);
+    SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
+    const size_t num_symbols_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
     SAFE_ASSERT(num_symbols_in_buf <= entries_in_chunk);
-    for (int j = 0; j < num_symbols_in_buf; ++j) {
+    for (size_t j = 0; j < num_symbols_in_buf; ++j) {
       const ElfW(Sym) &symbol = buf[j];
 
       // For a DSO, a symbol address is relocated by the loading address.
@@ -701,8 +731,18 @@
       const char *start_address =
           ComputeOffset(original_start_address, relocation);
 
+#ifdef __arm__
+      // ARM functions are always aligned to multiples of two bytes; the
+      // lowest-order bit in start_address is ignored by the CPU and indicates
+      // whether the function contains ARM (0) or Thumb (1) code. We don't care
+      // about what encoding is being used; we just want the real start address
+      // of the function.
+      start_address = reinterpret_cast<const char *>(
+          reinterpret_cast<uintptr_t>(start_address) & ~1u);
+#endif
+
       if (deref_function_descriptor_pointer &&
-          InSection(original_start_address, opd)) {
+          InSection(original_start_address, /*relocation=*/0, opd)) {
         // The opd section is mapped into memory.  Just dereference
         // start_address to get the first double word, which points to the
         // function entry.
@@ -711,7 +751,8 @@
 
       // If pc is inside the .opd section, it points to a function descriptor.
       const size_t size = pc_in_opd ? kFunctionDescriptorSize : symbol.st_size;
-      const void *const end_address = ComputeOffset(start_address, size);
+      const void *const end_address =
+          ComputeOffset(start_address, static_cast<ptrdiff_t>(size));
       if (symbol.st_value != 0 &&  // Skip null value symbols.
           symbol.st_shndx != 0 &&  // Skip undefined symbols.
 #ifdef STT_TLS
@@ -729,16 +770,18 @@
   }
 
   if (found_match) {
-    const size_t off = strtab->sh_offset + best_match.st_name;
+    const off_t off =
+        static_cast<off_t>(strtab->sh_offset) + best_match.st_name;
     const ssize_t n_read = ReadFromOffset(fd, out, out_size, off);
     if (n_read <= 0) {
       // This should never happen.
       ABSL_RAW_LOG(WARNING,
-                   "Unable to read from fd %d at offset %zu: n_read = %zd", fd,
-                   off, n_read);
+                   "Unable to read from fd %d at offset %lld: n_read = %zd", fd,
+                   static_cast<long long>(off), n_read);
       return SYMBOL_NOT_FOUND;
     }
-    ABSL_RAW_CHECK(n_read <= out_size, "ReadFromOffset read too much data.");
+    ABSL_RAW_CHECK(static_cast<size_t>(n_read) <= out_size,
+                   "ReadFromOffset read too much data.");
 
     // strtab->sh_offset points into .strtab-like section that contains
     // NUL-terminated strings: '\0foo\0barbaz\0...".
@@ -746,7 +789,7 @@
     // sh_offset+st_name points to the start of symbol name, but we don't know
     // how long the symbol is, so we try to read as much as we have space for,
     // and usually over-read (i.e. there is a NUL somewhere before n_read).
-    if (memchr(out, '\0', n_read) == nullptr) {
+    if (memchr(out, '\0', static_cast<size_t>(n_read)) == nullptr) {
       // Either out_size was too small (n_read == out_size and no NUL), or
       // we tried to read past the EOF (n_read < out_size) and .strtab is
       // corrupt (missing terminating NUL; should never happen for valid ELF).
@@ -764,7 +807,7 @@
 // See FindSymbol() comment for description of return value.
 FindSymbolResult Symbolizer::GetSymbolFromObjectFile(
     const ObjFile &obj, const void *const pc, const ptrdiff_t relocation,
-    char *out, int out_size, char *tmp_buf, int tmp_buf_size) {
+    char *out, size_t out_size, char *tmp_buf, size_t tmp_buf_size) {
   ElfW(Shdr) symtab;
   ElfW(Shdr) strtab;
   ElfW(Shdr) opd;
@@ -787,13 +830,15 @@
   // Consult a regular symbol table, then fall back to the dynamic symbol table.
   for (const auto symbol_table_type : {SHT_SYMTAB, SHT_DYNSYM}) {
     if (!GetSectionHeaderByType(obj.fd, obj.elf_header.e_shnum,
-                                obj.elf_header.e_shoff, symbol_table_type,
+                                static_cast<off_t>(obj.elf_header.e_shoff),
+                                static_cast<ElfW(Word)>(symbol_table_type),
                                 &symtab, tmp_buf, tmp_buf_size)) {
       continue;
     }
     if (!ReadFromOffsetExact(
             obj.fd, &strtab, sizeof(strtab),
-            obj.elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
+            static_cast<off_t>(obj.elf_header.e_shoff +
+                               symtab.sh_link * sizeof(symtab)))) {
       continue;
     }
     const FindSymbolResult rc =
@@ -818,7 +863,7 @@
 
   ~FileDescriptor() {
     if (fd_ >= 0) {
-      NO_INTR(close(fd_));
+      close(fd_);
     }
   }
 
@@ -835,7 +880,7 @@
 // and snprintf().
 class LineReader {
  public:
-  explicit LineReader(int fd, char *buf, int buf_len)
+  explicit LineReader(int fd, char *buf, size_t buf_len)
       : fd_(fd),
         buf_len_(buf_len),
         buf_(buf),
@@ -863,12 +908,12 @@
       bol_ = eol_ + 1;            // Advance to the next line in the buffer.
       SAFE_ASSERT(bol_ <= eod_);  // "bol_" can point to "eod_".
       if (!HasCompleteLine()) {
-        const int incomplete_line_length = eod_ - bol_;
+        const auto incomplete_line_length = static_cast<size_t>(eod_ - bol_);
         // Move the trailing incomplete line to the beginning.
         memmove(buf_, bol_, incomplete_line_length);
         // Read text from file and append it.
         char *const append_pos = buf_ + incomplete_line_length;
-        const int capacity_left = buf_len_ - incomplete_line_length;
+        const size_t capacity_left = buf_len_ - incomplete_line_length;
         const ssize_t num_bytes =
             ReadPersistent(fd_, append_pos, capacity_left);
         if (num_bytes <= 0) {  // EOF or error.
@@ -891,7 +936,8 @@
 
  private:
   char *FindLineFeed() const {
-    return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+    return reinterpret_cast<char *>(
+        memchr(bol_, '\n', static_cast<size_t>(eod_ - bol_)));
   }
 
   bool BufferIsEmpty() const { return buf_ == eod_; }
@@ -901,7 +947,7 @@
   }
 
   const int fd_;
-  const int buf_len_;
+  const size_t buf_len_;
   char *const buf_;
   char *bol_;
   char *eol_;
@@ -919,7 +965,8 @@
     int ch = *p;
     if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') ||
         (ch >= 'a' && ch <= 'f')) {
-      hex = (hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+      hex = (hex << 4) |
+            static_cast<uint64_t>(ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
     } else {  // Encountered the first non-hex character.
       break;
     }
@@ -951,7 +998,7 @@
 static ABSL_ATTRIBUTE_NOINLINE bool ReadAddrMap(
     bool (*callback)(const char *filename, const void *const start_addr,
                      const void *const end_addr, uint64_t offset, void *arg),
-    void *arg, void *tmp_buf, int tmp_buf_size) {
+    void *arg, void *tmp_buf, size_t tmp_buf_size) {
   // Use /proc/self/task/<pid>/maps instead of /proc/self/maps. The latter
   // requires kernel to stop all threads, and is significantly slower when there
   // are 1000s of threads.
@@ -1066,10 +1113,10 @@
       }
     }
 
-    int lo = 0;
-    int hi = addr_map_.Size();
+    size_t lo = 0;
+    size_t hi = addr_map_.Size();
     while (lo < hi) {
-      int mid = (lo + hi) / 2;
+      size_t mid = (lo + hi) / 2;
       if (addr < addr_map_.At(mid)->end_addr) {
         hi = mid;
       } else {
@@ -1091,11 +1138,11 @@
 }
 
 void Symbolizer::ClearAddrMap() {
-  for (int i = 0; i != addr_map_.Size(); i++) {
+  for (size_t i = 0; i != addr_map_.Size(); i++) {
     ObjFile *o = addr_map_.At(i);
     base_internal::LowLevelAlloc::Free(o->filename);
     if (o->fd >= 0) {
-      NO_INTR(close(o->fd));
+      close(o->fd);
     }
   }
   addr_map_.Clear();
@@ -1111,7 +1158,7 @@
 
   // Files are supposed to be added in the increasing address order.  Make
   // sure that's the case.
-  int addr_map_size = impl->addr_map_.Size();
+  size_t addr_map_size = impl->addr_map_.Size();
   if (addr_map_size != 0) {
     ObjFile *old = impl->addr_map_.At(addr_map_size - 1);
     if (old->end_addr > end_addr) {
@@ -1131,6 +1178,14 @@
                      reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
       }
       return true;
+    } else if (old->end_addr == start_addr &&
+               reinterpret_cast<uintptr_t>(old->start_addr) - old->offset ==
+                   reinterpret_cast<uintptr_t>(start_addr) - offset &&
+               strcmp(old->filename, filename) == 0) {
+      // Two contiguous map entries that span a contiguous region of the file,
+      // perhaps because some part of the file was mlock()ed. Combine them.
+      old->end_addr = end_addr;
+      return true;
     }
   }
   ObjFile *obj = impl->addr_map_.Add();
@@ -1147,12 +1202,12 @@
 // where the input symbol is demangled in-place.
 // To keep stack consumption low, we would like this function to not
 // get inlined.
-static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size,
+static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, size_t out_size,
                                                     char *tmp_buf,
-                                                    int tmp_buf_size) {
+                                                    size_t tmp_buf_size) {
   if (Demangle(out, tmp_buf, tmp_buf_size)) {
     // Demangling succeeded. Copy to out if the space allows.
-    int len = strlen(tmp_buf);
+    size_t len = strlen(tmp_buf);
     if (len + 1 <= out_size) {  // +1 for '\0'.
       SAFE_ASSERT(len < tmp_buf_size);
       memmove(out, tmp_buf, len + 1);
@@ -1195,7 +1250,8 @@
 
   SymbolCacheLine *line = GetCacheLine(pc);
   uint32_t max_age = 0;
-  int oldest_index = -1;
+  size_t oldest_index = 0;
+  bool found_oldest_index = false;
   for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) {
     if (line->pc[i] == nullptr) {
       AgeSymbols(line);
@@ -1207,11 +1263,12 @@
     if (line->age[i] >= max_age) {
       max_age = line->age[i];
       oldest_index = i;
+      found_oldest_index = true;
     }
   }
 
   AgeSymbols(line);
-  ABSL_RAW_CHECK(oldest_index >= 0, "Corrupt cache");
+  ABSL_RAW_CHECK(found_oldest_index, "Corrupt cache");
   base_internal::LowLevelAlloc::Free(line->name[oldest_index]);
   line->pc[oldest_index] = pc;
   line->name[oldest_index] = CopyString(name);
@@ -1280,8 +1337,8 @@
     }
     const int phnum = obj->elf_header.e_phnum;
     const int phentsize = obj->elf_header.e_phentsize;
-    size_t phoff = obj->elf_header.e_phoff;
-    int num_executable_load_segments = 0;
+    auto phoff = static_cast<off_t>(obj->elf_header.e_phoff);
+    size_t num_interesting_load_segments = 0;
     for (int j = 0; j < phnum; j++) {
       ElfW(Phdr) phdr;
       if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) {
@@ -1290,22 +1347,35 @@
         return false;
       }
       phoff += phentsize;
-      constexpr int rx = PF_X | PF_R;
-      if (phdr.p_type != PT_LOAD || (phdr.p_flags & rx) != rx) {
-        // Not a LOAD segment, or not executable code.
+
+#if defined(__powerpc__) && !(_CALL_ELF > 1)
+      // On the PowerPC ELF v1 ABI, function pointers actually point to function
+      // descriptors. These descriptors are stored in an .opd section, which is
+      // mapped read-only. We thus need to look at all readable segments, not
+      // just the executable ones.
+      constexpr int interesting = PF_R;
+#else
+      constexpr int interesting = PF_X | PF_R;
+#endif
+
+      if (phdr.p_type != PT_LOAD
+          || (phdr.p_flags & interesting) != interesting) {
+        // Not a LOAD segment, not executable code, and not a function
+        // descriptor.
         continue;
       }
-      if (num_executable_load_segments < obj->phdr.size()) {
-        memcpy(&obj->phdr[num_executable_load_segments++], &phdr, sizeof(phdr));
+      if (num_interesting_load_segments < obj->phdr.size()) {
+        memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, sizeof(phdr));
       } else {
-        ABSL_RAW_LOG(WARNING, "%s: too many executable LOAD segments",
-                     obj->filename);
+        ABSL_RAW_LOG(
+            WARNING, "%s: too many interesting LOAD segments: %zu >= %zu",
+            obj->filename, num_interesting_load_segments, obj->phdr.size());
         break;
       }
     }
-    if (num_executable_load_segments == 0) {
-      // This object has no "r-x" LOAD segments. That's unexpected.
-      ABSL_RAW_LOG(WARNING, "%s: no executable LOAD segments", obj->filename);
+    if (num_interesting_load_segments == 0) {
+      // This object has no interesting LOAD segments. That's unexpected.
+      ABSL_RAW_LOG(WARNING, "%s: no interesting LOAD segments", obj->filename);
       return false;
     }
   }
@@ -1319,13 +1389,7 @@
 // they are called here as well.
 // To keep stack consumption low, we would like this function to not
 // get inlined.
-const char *Symbolizer::GetSymbol(const void *const pc) {
-  const char *entry = FindSymbolInCache(pc);
-  if (entry != nullptr) {
-    return entry;
-  }
-  symbol_buf_[0] = '\0';
-
+const char *Symbolizer::GetUncachedSymbol(const void *pc) {
   ObjFile *const obj = FindObjFile(pc, 1);
   ptrdiff_t relocation = 0;
   int fd = -1;
@@ -1337,12 +1401,12 @@
         //
         // For obj->offset > 0, adjust the relocation since a mapping at offset
         // X in the file will have a start address of [true relocation]+X.
-        relocation = start_addr - obj->offset;
+        relocation = static_cast<ptrdiff_t>(start_addr - obj->offset);
 
-        // Note: some binaries have multiple "rx" LOAD segments. We must
-        // find the right one.
+        // Note: some binaries have multiple LOAD segments that can contain
+        // function pointers. We must find the right one.
         ElfW(Phdr) *phdr = nullptr;
-        for (int j = 0; j < obj->phdr.size(); j++) {
+        for (size_t j = 0; j < obj->phdr.size(); j++) {
           ElfW(Phdr) &p = obj->phdr[j];
           if (p.p_type != PT_LOAD) {
             // We only expect PT_LOADs. This must be PT_NULL that we didn't
@@ -1350,7 +1414,7 @@
             ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type");
             break;
           }
-          if (pc < reinterpret_cast<void *>(start_addr + p.p_memsz)) {
+          if (pc < reinterpret_cast<void *>(start_addr + p.p_vaddr + p.p_memsz)) {
             phdr = &p;
             break;
           }
@@ -1413,6 +1477,42 @@
   return InsertSymbolInCache(pc, symbol_buf_);
 }
 
+const char *Symbolizer::GetSymbol(const void *pc) {
+  const char *entry = FindSymbolInCache(pc);
+  if (entry != nullptr) {
+    return entry;
+  }
+  symbol_buf_[0] = '\0';
+
+#ifdef __hppa__
+  {
+    // In some contexts (e.g., return addresses), PA-RISC uses the lowest two
+    // bits of the address to indicate the privilege level. Clear those bits
+    // before trying to symbolize.
+    const auto pc_bits = reinterpret_cast<uintptr_t>(pc);
+    const auto address = pc_bits & ~0x3;
+    entry = GetUncachedSymbol(reinterpret_cast<const void *>(address));
+    if (entry != nullptr) {
+      return entry;
+    }
+
+    // In some contexts, PA-RISC also uses bit 1 of the address to indicate that
+    // this is a cross-DSO function pointer. Such function pointers actually
+    // point to a procedure label, a struct whose first 32-bit (pointer) element
+    // actually points to the function text. With no symbol found for this
+    // address so far, try interpreting it as a cross-DSO function pointer and
+    // see how that goes.
+    if (pc_bits & 0x2) {
+      return GetUncachedSymbol(*reinterpret_cast<const void *const *>(address));
+    }
+
+    return nullptr;
+  }
+#else
+  return GetUncachedSymbol(pc);
+#endif
+}
+
 bool RemoveAllSymbolDecorators(void) {
   if (!g_decorators_mu.TryLock()) {
     // Someone else is using decorators. Get out.
@@ -1476,7 +1576,7 @@
     ret = false;
   } else {
     // TODO(ckennelly): Move this into a string copy routine.
-    int len = strlen(filename);
+    size_t len = strlen(filename);
     char *dst = static_cast<char *>(
         base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena()));
     ABSL_RAW_CHECK(dst != nullptr, "out of memory");
@@ -1532,16 +1632,17 @@
   const char *name = s->GetSymbol(pc);
   bool ok = false;
   if (name != nullptr && out_size > 0) {
-    strncpy(out, name, out_size);
+    strncpy(out, name, static_cast<size_t>(out_size));
     ok = true;
-    if (out[out_size - 1] != '\0') {
+    if (out[static_cast<size_t>(out_size) - 1] != '\0') {
       // strncpy() does not '\0' terminate when it truncates.  Do so, with
       // trailing ellipsis.
       static constexpr char kEllipsis[] = "...";
-      int ellipsis_size =
-          std::min(implicit_cast<int>(strlen(kEllipsis)), out_size - 1);
-      memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
-      out[out_size - 1] = '\0';
+      size_t ellipsis_size =
+          std::min(strlen(kEllipsis), static_cast<size_t>(out_size) - 1);
+      memcpy(out + static_cast<size_t>(out_size) - ellipsis_size - 1, kEllipsis,
+             ellipsis_size);
+      out[static_cast<size_t>(out_size) - 1] = '\0';
     }
   }
   debugging_internal::FreeSymbolizer(s);
diff --git a/abseil-cpp/absl/debugging/symbolize_emscripten.inc b/abseil-cpp/absl/debugging/symbolize_emscripten.inc
new file mode 100644
index 0000000..a0f344d
--- /dev/null
+++ b/abseil-cpp/absl/debugging/symbolize_emscripten.inc
@@ -0,0 +1,75 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cxxabi.h>
+#include <emscripten.h>
+
+#include <algorithm>
+#include <cstring>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/internal/demangle.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+extern "C" {
+const char* emscripten_pc_get_function(const void* pc);
+}
+
+// clang-format off
+EM_JS(bool, HaveOffsetConverter, (),
+      { return typeof wasmOffsetConverter !== 'undefined'; });
+// clang-format on
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+void InitializeSymbolizer(const char*) {
+  if (!HaveOffsetConverter()) {
+    ABSL_RAW_LOG(INFO,
+                 "Symbolization unavailable. Rebuild with -sWASM=1 "
+                 "and -sUSE_OFFSET_CONVERTER=1.");
+  }
+}
+
+bool Symbolize(const void* pc, char* out, int out_size) {
+  // Check if we have the offset converter necessary for pc_get_function.
+  // Without it, the program will abort().
+  if (!HaveOffsetConverter()) {
+    return false;
+  }
+  if (pc == nullptr || out_size <= 0) {
+    return false;
+  }
+  const char* func_name = emscripten_pc_get_function(pc);
+  if (func_name == nullptr) {
+    return false;
+  }
+
+  strncpy(out, func_name, out_size);
+
+  if (out[out_size - 1] != '\0') {
+    // strncpy() does not '\0' terminate when it truncates.
+    static constexpr char kEllipsis[] = "...";
+    int ellipsis_size = std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+    memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+    out[out_size - 1] = '\0';
+  }
+
+  return true;
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/debugging/symbolize_test.cc b/abseil-cpp/absl/debugging/symbolize_test.cc
index a2dd495..d0feab2 100644
--- a/abseil-cpp/absl/debugging/symbolize_test.cc
+++ b/abseil-cpp/absl/debugging/symbolize_test.cc
@@ -14,6 +14,10 @@
 
 #include "absl/debugging/symbolize.h"
 
+#ifdef __EMSCRIPTEN__
+#include <emscripten.h>
+#endif
+
 #ifndef _WIN32
 #include <fcntl.h>
 #include <sys/mman.h>
@@ -29,12 +33,17 @@
 #include "absl/base/casts.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/per_thread_tls.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/optimization.h"
 #include "absl/debugging/internal/stack_consumption.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
 #include "absl/memory/memory.h"
 #include "absl/strings/string_view.h"
 
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
 using testing::Contains;
 
 #ifdef _WIN32
@@ -81,21 +90,13 @@
   return 0;
 }
 
-int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.hot) hot_func() {
-  return 0;
-}
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.hot) hot_func() { return 0; }
 
-int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.startup) startup_func() {
-  return 0;
-}
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.startup) startup_func() { return 0; }
 
-int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.exit) exit_func() {
-  return 0;
-}
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.exit) exit_func() { return 0; }
 
-int /*ABSL_ATTRIBUTE_SECTION_VARIABLE(.text)*/ regular_func() {
-  return 0;
-}
+int /*ABSL_ATTRIBUTE_SECTION_VARIABLE(.text)*/ regular_func() { return 0; }
 
 // Thread-local data may confuse the symbolizer, ensure that it does not.
 // Variable sizes and order are important.
@@ -106,6 +107,8 @@
 #endif
 
 #if !defined(__EMSCRIPTEN__)
+static void *GetPCFromFnPtr(void *ptr) { return ptr; }
+
 // Used below to hopefully inhibit some compiler/linker optimizations
 // that may remove kHpageTextPadding, kPadding0, and kPadding1 from
 // the binary.
@@ -114,7 +117,14 @@
 // Force the binary to be large enough that a THP .text remap will succeed.
 static constexpr size_t kHpageSize = 1 << 21;
 const char kHpageTextPadding[kHpageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(
-    .text) = "";
+        .text) = "";
+
+#else
+static void *GetPCFromFnPtr(void *ptr) {
+  return EM_ASM_PTR(
+      { return wasmOffsetConverter.convert(wasmTable.get($0).name, 0); }, ptr);
+}
+
 #endif  // !defined(__EMSCRIPTEN__)
 
 static char try_symbolize_buffer[4096];
@@ -124,15 +134,17 @@
 // absl::Symbolize() returns false, otherwise returns try_symbolize_buffer with
 // the result of absl::Symbolize().
 static const char *TrySymbolizeWithLimit(void *pc, int limit) {
-  ABSL_RAW_CHECK(limit <= sizeof(try_symbolize_buffer),
-                 "try_symbolize_buffer is too small");
+  CHECK_LE(limit, sizeof(try_symbolize_buffer))
+      << "try_symbolize_buffer is too small";
 
   // Use the heap to facilitate heap and buffer sanitizer tools.
   auto heap_buffer = absl::make_unique<char[]>(sizeof(try_symbolize_buffer));
   bool found = absl::Symbolize(pc, heap_buffer.get(), limit);
   if (found) {
-    ABSL_RAW_CHECK(strnlen(heap_buffer.get(), limit) < limit,
-                   "absl::Symbolize() did not properly terminate the string");
+    CHECK_LT(static_cast<int>(
+                 strnlen(heap_buffer.get(), static_cast<size_t>(limit))),
+             limit)
+        << "absl::Symbolize() did not properly terminate the string";
     strncpy(try_symbolize_buffer, heap_buffer.get(),
             sizeof(try_symbolize_buffer) - 1);
     try_symbolize_buffer[sizeof(try_symbolize_buffer) - 1] = '\0';
@@ -146,16 +158,29 @@
   return TrySymbolizeWithLimit(pc, sizeof(try_symbolize_buffer));
 }
 
-#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \
-    defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE)
+#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) ||    \
+    defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) || \
+    defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE)
+
+// Test with a return address.
+void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() {
+#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
+  void *return_address = __builtin_return_address(0);
+  const char *symbol = TrySymbolize(return_address);
+  CHECK_NE(symbol, nullptr) << "TestWithReturnAddress failed";
+  CHECK_STREQ(symbol, "main") << "TestWithReturnAddress failed";
+  std::cout << "TestWithReturnAddress passed" << std::endl;
+#endif
+}
 
 TEST(Symbolize, Cached) {
   // Compilers should give us pointers to them.
-  EXPECT_STREQ("nonstatic_func", TrySymbolize((void *)(&nonstatic_func)));
-
+  EXPECT_STREQ("nonstatic_func",
+               TrySymbolize(GetPCFromFnPtr((void *)(&nonstatic_func))));
   // The name of an internal linkage symbol is not specified; allow either a
   // mangled or an unmangled name here.
-  const char *static_func_symbol = TrySymbolize((void *)(&static_func));
+  const char *static_func_symbol =
+      TrySymbolize(GetPCFromFnPtr((void *)(&static_func)));
   EXPECT_TRUE(strcmp("static_func", static_func_symbol) == 0 ||
               strcmp("static_func()", static_func_symbol) == 0);
 
@@ -165,33 +190,50 @@
 TEST(Symbolize, Truncation) {
   constexpr char kNonStaticFunc[] = "nonstatic_func";
   EXPECT_STREQ("nonstatic_func",
-               TrySymbolizeWithLimit((void *)(&nonstatic_func),
+               TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
                                      strlen(kNonStaticFunc) + 1));
   EXPECT_STREQ("nonstatic_...",
-               TrySymbolizeWithLimit((void *)(&nonstatic_func),
+               TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
                                      strlen(kNonStaticFunc) + 0));
   EXPECT_STREQ("nonstatic...",
-               TrySymbolizeWithLimit((void *)(&nonstatic_func),
+               TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
                                      strlen(kNonStaticFunc) - 1));
-  EXPECT_STREQ("n...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 5));
-  EXPECT_STREQ("...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 4));
-  EXPECT_STREQ("..", TrySymbolizeWithLimit((void *)(&nonstatic_func), 3));
-  EXPECT_STREQ(".", TrySymbolizeWithLimit((void *)(&nonstatic_func), 2));
-  EXPECT_STREQ("", TrySymbolizeWithLimit((void *)(&nonstatic_func), 1));
-  EXPECT_EQ(nullptr, TrySymbolizeWithLimit((void *)(&nonstatic_func), 0));
+  EXPECT_STREQ("n...", TrySymbolizeWithLimit(
+                           GetPCFromFnPtr((void *)(&nonstatic_func)), 5));
+  EXPECT_STREQ("...", TrySymbolizeWithLimit(
+                          GetPCFromFnPtr((void *)(&nonstatic_func)), 4));
+  EXPECT_STREQ("..", TrySymbolizeWithLimit(
+                         GetPCFromFnPtr((void *)(&nonstatic_func)), 3));
+  EXPECT_STREQ(
+      ".", TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)), 2));
+  EXPECT_STREQ(
+      "", TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)), 1));
+  EXPECT_EQ(nullptr, TrySymbolizeWithLimit(
+                         GetPCFromFnPtr((void *)(&nonstatic_func)), 0));
 }
 
 TEST(Symbolize, SymbolizeWithDemangling) {
   Foo::func(100);
-  EXPECT_STREQ("Foo::func()", TrySymbolize((void *)(&Foo::func)));
+#ifdef __EMSCRIPTEN__
+  // Emscripten's online symbolizer is more precise with arguments.
+  EXPECT_STREQ("Foo::func(int)",
+               TrySymbolize(GetPCFromFnPtr((void *)(&Foo::func))));
+#else
+  EXPECT_STREQ("Foo::func()",
+               TrySymbolize(GetPCFromFnPtr((void *)(&Foo::func))));
+#endif
 }
 
 TEST(Symbolize, SymbolizeSplitTextSections) {
-  EXPECT_STREQ("unlikely_func()", TrySymbolize((void *)(&unlikely_func)));
-  EXPECT_STREQ("hot_func()", TrySymbolize((void *)(&hot_func)));
-  EXPECT_STREQ("startup_func()", TrySymbolize((void *)(&startup_func)));
-  EXPECT_STREQ("exit_func()", TrySymbolize((void *)(&exit_func)));
-  EXPECT_STREQ("regular_func()", TrySymbolize((void *)(&regular_func)));
+  EXPECT_STREQ("unlikely_func()",
+               TrySymbolize(GetPCFromFnPtr((void *)(&unlikely_func))));
+  EXPECT_STREQ("hot_func()", TrySymbolize(GetPCFromFnPtr((void *)(&hot_func))));
+  EXPECT_STREQ("startup_func()",
+               TrySymbolize(GetPCFromFnPtr((void *)(&startup_func))));
+  EXPECT_STREQ("exit_func()",
+               TrySymbolize(GetPCFromFnPtr((void *)(&exit_func))));
+  EXPECT_STREQ("regular_func()",
+               TrySymbolize(GetPCFromFnPtr((void *)(&regular_func))));
 }
 
 // Tests that verify that Symbolize stack footprint is within some limit.
@@ -261,15 +303,14 @@
 
 #endif  // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
 
-#ifndef ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE
+#if !defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) && \
+    !defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE)
 // Use a 64K page size for PPC.
 const size_t kPageSize = 64 << 10;
 // We place a read-only symbols into the .text section and verify that we can
 // symbolize them and other symbols after remapping them.
-const char kPadding0[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) =
-    "";
-const char kPadding1[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) =
-    "";
+const char kPadding0[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) = "";
+const char kPadding1[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) = "";
 
 static int FilterElfHeader(struct dl_phdr_info *info, size_t size, void *data) {
   for (int i = 0; i < info->dlpi_phnum; i++) {
@@ -300,8 +341,8 @@
 TEST(Symbolize, SymbolizeWithMultipleMaps) {
   // Force kPadding0 and kPadding1 to be linked in.
   if (volatile_bool) {
-    ABSL_RAW_LOG(INFO, "%s", kPadding0);
-    ABSL_RAW_LOG(INFO, "%s", kPadding1);
+    LOG(INFO) << kPadding0;
+    LOG(INFO) << kPadding1;
   }
 
   // Verify we can symbolize everything.
@@ -378,12 +419,14 @@
                 DummySymbolDecorator, &c_message),
             0);
 
-  char *address = reinterpret_cast<char *>(1);
-  EXPECT_STREQ("abc", TrySymbolize(address++));
+  // Use addresses 4 and 8 here to ensure that we always use valid addresses
+  // even on systems that require instructions to be 32-bit aligned.
+  char *address = reinterpret_cast<char *>(4);
+  EXPECT_STREQ("abc", TrySymbolize(address));
 
   EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_b));
 
-  EXPECT_STREQ("ac", TrySymbolize(address++));
+  EXPECT_STREQ("ac", TrySymbolize(address + 4));
 
   // Cleanup: remove all remaining decorators so other stack traces don't
   // get mystery "ac" decoration.
@@ -417,16 +460,17 @@
 
   close(fd);
 }
-#endif  // !ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE
+#endif  // !ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE &&
+        // !ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
 
 // x86 specific tests.  Uses some inline assembler.
 extern "C" {
 inline void *ABSL_ATTRIBUTE_ALWAYS_INLINE inline_func() {
   void *pc = nullptr;
 #if defined(__i386__)
-  __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [ PC ] "=r"(pc));
+  __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [PC] "=r"(pc));
 #elif defined(__x86_64__)
-  __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [ PC ] "=r"(pc));
+  __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [PC] "=r"(pc));
 #endif
   return pc;
 }
@@ -434,9 +478,9 @@
 void *ABSL_ATTRIBUTE_NOINLINE non_inline_func() {
   void *pc = nullptr;
 #if defined(__i386__)
-  __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [ PC ] "=r"(pc));
+  __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [PC] "=r"(pc));
 #elif defined(__x86_64__)
-  __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [ PC ] "=r"(pc));
+  __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [PC] "=r"(pc));
 #endif
   return pc;
 }
@@ -446,9 +490,9 @@
     (defined(__i386__) || defined(__x86_64__))
   void *pc = non_inline_func();
   const char *symbol = TrySymbolize(pc);
-  ABSL_RAW_CHECK(symbol != nullptr, "TestWithPCInsideNonInlineFunction failed");
-  ABSL_RAW_CHECK(strcmp(symbol, "non_inline_func") == 0,
-                 "TestWithPCInsideNonInlineFunction failed");
+  CHECK_NE(symbol, nullptr) << "TestWithPCInsideNonInlineFunction failed";
+  CHECK_STREQ(symbol, "non_inline_func")
+      << "TestWithPCInsideNonInlineFunction failed";
   std::cout << "TestWithPCInsideNonInlineFunction passed" << std::endl;
 #endif
 }
@@ -458,25 +502,58 @@
     (defined(__i386__) || defined(__x86_64__))
   void *pc = inline_func();  // Must be inlined.
   const char *symbol = TrySymbolize(pc);
-  ABSL_RAW_CHECK(symbol != nullptr, "TestWithPCInsideInlineFunction failed");
-  ABSL_RAW_CHECK(strcmp(symbol, __FUNCTION__) == 0,
-                 "TestWithPCInsideInlineFunction failed");
+  CHECK_NE(symbol, nullptr) << "TestWithPCInsideInlineFunction failed";
+  CHECK_STREQ(symbol, __FUNCTION__) << "TestWithPCInsideInlineFunction failed";
   std::cout << "TestWithPCInsideInlineFunction passed" << std::endl;
 #endif
 }
 }
 
-// Test with a return address.
-void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() {
+#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
+    ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
+// Test that we correctly identify bounds of Thumb functions on ARM.
+//
+// Thumb functions have the lowest-order bit set in their addresses in the ELF
+// symbol table. This requires some extra logic to properly compute function
+// bounds. To test this logic, nudge a Thumb function right up against an ARM
+// function and try to symbolize the ARM function.
+//
+// A naive implementation will simply use the Thumb function's entry point as
+// written in the symbol table and will therefore treat the Thumb function as
+// extending one byte further in the instruction stream than it actually does.
+// When asked to symbolize the start of the ARM function, it will identify an
+// overlap between the Thumb and ARM functions, and it will return the name of
+// the Thumb function.
+//
+// A correct implementation, on the other hand, will null out the lowest-order
+// bit in the Thumb function's entry point. It will correctly compute the end of
+// the Thumb function, it will find no overlap between the Thumb and ARM
+// functions, and it will return the name of the ARM function.
+//
+// Unfortunately we cannot perform this test on armv6 or lower systems that use
+// the hard float ABI because gcc refuses to compile thumb functions on such
+// systems with a "sorry, unimplemented: Thumb-1 hard-float VFP ABI" error.
+
+__attribute__((target("thumb"))) int ArmThumbOverlapThumb(int x) {
+  return x * x * x;
+}
+
+__attribute__((target("arm"))) int ArmThumbOverlapArm(int x) {
+  return x * x * x;
+}
+
+void ABSL_ATTRIBUTE_NOINLINE TestArmThumbOverlap() {
 #if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
-  void *return_address = __builtin_return_address(0);
-  const char *symbol = TrySymbolize(return_address);
-  ABSL_RAW_CHECK(symbol != nullptr, "TestWithReturnAddress failed");
-  ABSL_RAW_CHECK(strcmp(symbol, "main") == 0, "TestWithReturnAddress failed");
-  std::cout << "TestWithReturnAddress passed" << std::endl;
+  const char *symbol = TrySymbolize((void *)&ArmThumbOverlapArm);
+  CHECK_NE(symbol, nullptr) << "TestArmThumbOverlap failed";
+  CHECK_STREQ("ArmThumbOverlapArm()", symbol) << "TestArmThumbOverlap failed";
+  std::cout << "TestArmThumbOverlap passed" << std::endl;
 #endif
 }
 
+#endif  // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && ((__ARM_ARCH >= 7)
+        // || !defined(__ARM_PCS_VFP))
+
 #elif defined(_WIN32)
 #if !defined(ABSL_CONSUME_DLL)
 
@@ -518,8 +595,7 @@
 }
 
 #endif  // !defined(ABSL_CONSUME_DLL)
-#else  // Symbolizer unimplemented
-
+#else   // Symbolizer unimplemented
 TEST(Symbolize, Unimplemented) {
   char buf[64];
   EXPECT_FALSE(absl::Symbolize((void *)(&nonstatic_func), buf, sizeof(buf)));
@@ -533,7 +609,7 @@
 #if !defined(__EMSCRIPTEN__)
   // Make sure kHpageTextPadding is linked into the binary.
   if (volatile_bool) {
-    ABSL_RAW_LOG(INFO, "%s", kHpageTextPadding);
+    LOG(INFO) << kHpageTextPadding;
   }
 #endif  // !defined(__EMSCRIPTEN__)
 
@@ -546,11 +622,16 @@
   absl::InitializeSymbolizer(argv[0]);
   testing::InitGoogleTest(&argc, argv);
 
-#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \
+#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) ||        \
+    defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE) || \
     defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE)
   TestWithPCInsideInlineFunction();
   TestWithPCInsideNonInlineFunction();
   TestWithReturnAddress();
+#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
+    ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
+  TestArmThumbOverlap();
+#endif
 #endif
 
   return RUN_ALL_TESTS();
diff --git a/abseil-cpp/absl/debugging/symbolize_win32.inc b/abseil-cpp/absl/debugging/symbolize_win32.inc
index c3df46f..53a099a 100644
--- a/abseil-cpp/absl/debugging/symbolize_win32.inc
+++ b/abseil-cpp/absl/debugging/symbolize_win32.inc
@@ -65,14 +65,15 @@
   if (!SymFromAddr(process, reinterpret_cast<DWORD64>(pc), nullptr, symbol)) {
     return false;
   }
-  strncpy(out, symbol->Name, out_size);
-  if (out[out_size - 1] != '\0') {
+  const size_t out_size_t = static_cast<size_t>(out_size);
+  strncpy(out, symbol->Name, out_size_t);
+  if (out[out_size_t - 1] != '\0') {
     // strncpy() does not '\0' terminate when it truncates.
     static constexpr char kEllipsis[] = "...";
-    int ellipsis_size =
-        std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
-    memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
-    out[out_size - 1] = '\0';
+    size_t ellipsis_size =
+        std::min(sizeof(kEllipsis) - 1, out_size_t - 1);
+    memcpy(out + out_size_t - ellipsis_size - 1, kEllipsis, ellipsis_size);
+    out[out_size_t - 1] = '\0';
   }
   return true;
 }
diff --git a/abseil-cpp/absl/flags/BUILD.bazel b/abseil-cpp/absl/flags/BUILD.bazel
index 62fb9a8..50bf387 100644
--- a/abseil-cpp/absl/flags/BUILD.bazel
+++ b/abseil-cpp/absl/flags/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -54,6 +53,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     visibility = [
         "//absl/flags:__pkg__",
+        "//absl/log:__pkg__",
     ],
     deps = [
         ":path_util",
@@ -99,8 +99,10 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:log_severity",
+        "//absl/numeric:int128",
         "//absl/strings",
         "//absl/strings:str_format",
+        "//absl/types:optional",
     ],
 )
 
@@ -114,7 +116,9 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
+    visibility = [
+        "//visibility:private",
+    ],
     deps = [
         "//absl/base:config",
         "//absl/base:fast_type_id",
@@ -192,6 +196,7 @@
     ],
     hdrs = [
         "internal/flag.h",
+        "internal/sequence_lock.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -205,6 +210,7 @@
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:dynamic_annotations",
         "//absl/memory",
         "//absl/meta:type_traits",
         "//absl/strings",
@@ -217,6 +223,7 @@
     name = "flag",
     srcs = [
         "flag.cc",
+        "internal/flag_msvc.inc",
     ],
     hdrs = [
         "declare.h",
@@ -259,6 +266,7 @@
         ":reflection",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/container:flat_hash_map",
         "//absl/strings",
     ],
 )
@@ -277,6 +285,7 @@
         ":usage_internal",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
         "//absl/strings",
         "//absl/synchronization",
     ],
@@ -302,6 +311,7 @@
         ":reflection",
         ":usage",
         ":usage_internal",
+        "//absl/algorithm:container",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/strings",
@@ -320,6 +330,13 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:android",
+        "no_test:os:ios",
+        "no_test_android",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
     deps = [
         ":commandlineflag",
         ":commandlineflag_internal",
@@ -356,6 +373,13 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:android",
+        "no_test:os:ios",
+        "no_test_android",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
     deps = [
         ":config",
         ":flag",
@@ -364,6 +388,7 @@
         ":reflection",
         "//absl/base:core_headers",
         "//absl/base:malloc_internal",
+        "//absl/numeric:int128",
         "//absl/strings",
         "//absl/time",
         "@com_google_googletest//:gtest_main",
@@ -377,11 +402,17 @@
         "flag_benchmark.cc",
     ],
     copts = ABSL_TEST_COPTS,
+    linkopts = select({
+        "//conditions:default": [],
+    }) + ABSL_DEFAULT_LINKOPTS,
     tags = ["benchmark"],
     visibility = ["//visibility:private"],
     deps = [
+        "flag_benchmark.lds",
         ":flag",
         ":marshalling",
+        ":parse",
+        ":reflection",
         "//absl/strings",
         "//absl/time",
         "//absl/types:optional",
@@ -411,12 +442,20 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:android",
+        "no_test:os:ios",
+        "no_test_android",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
     deps = [
         ":flag",
         ":parse",
         ":reflection",
-        "//absl/base:raw_logging_internal",
+        ":usage_internal",
         "//absl/base:scoped_set_env",
+        "//absl/log",
         "//absl/strings",
         "//absl/types:span",
         "@com_google_googletest//:gtest_main",
@@ -445,6 +484,7 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["no_test_wasm"],
     deps = [
         ":program_name",
         "//absl/strings",
@@ -460,6 +500,13 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:android",
+        "no_test:os:ios",
+        "no_test_android",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
     deps = [
         ":commandlineflag_internal",
         ":flag",
@@ -473,6 +520,26 @@
 )
 
 cc_test(
+    name = "sequence_lock_test",
+    size = "small",
+    timeout = "moderate",
+    srcs = [
+        "internal/sequence_lock_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    shard_count = 31,
+    tags = ["no_test_wasm"],
+    deps = [
+        ":flag_internal",
+        "//absl/base",
+        "//absl/container:fixed_array",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "usage_config_test",
     size = "small",
     srcs = [
@@ -497,6 +564,13 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:android",
+        "no_test:os:ios",
+        "no_test_android",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
     deps = [
         ":config",
         ":flag",
diff --git a/abseil-cpp/absl/flags/CMakeLists.txt b/abseil-cpp/absl/flags/CMakeLists.txt
index 8855191..b20463f 100644
--- a/abseil-cpp/absl/flags/CMakeLists.txt
+++ b/abseil-cpp/absl/flags/CMakeLists.txt
@@ -87,6 +87,8 @@
     absl::config
     absl::core_headers
     absl::log_severity
+    absl::int128
+    absl::optional
     absl::strings
     absl::str_format
 )
@@ -105,6 +107,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::config
+    absl::dynamic_annotations
     absl::fast_type_id
 )
 
@@ -176,6 +179,7 @@
     "internal/flag.cc"
   HDRS
     "internal/flag.h"
+    "internal/sequence_lock.h"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   LINKOPTS
@@ -201,6 +205,7 @@
   HDRS
     "declare.h"
     "flag.h"
+    "internal/flag_msvc.inc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   LINKOPTS
@@ -238,6 +243,7 @@
     absl::flags_private_handle_accessor
     absl::flags_program_name
     absl::flags_reflection
+    absl::flat_hash_map
     absl::strings
     absl::synchronization
 )
@@ -257,6 +263,7 @@
     absl::config
     absl::core_headers
     absl::flags_usage_internal
+    absl::raw_logging_internal
     absl::strings
     absl::synchronization
 )
@@ -274,6 +281,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::algorithm_container
     absl::config
     absl::core_headers
     absl::flags_config
@@ -290,7 +298,7 @@
 )
 
 ############################################################################
-# Unit tests in alpahabetical order.
+# Unit tests in alphabetical order.
 
 absl_cc_test(
   NAME
@@ -308,7 +316,7 @@
     absl::flags_reflection
     absl::memory
     absl::strings
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -320,7 +328,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::flags_config
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -338,9 +346,10 @@
     absl::flags_internal
     absl::flags_marshalling
     absl::flags_reflection
+    absl::int128
     absl::strings
     absl::time
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -352,7 +361,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::flags_marshalling
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -366,11 +375,12 @@
     absl::flags
     absl::flags_parse
     absl::flags_reflection
-    absl::raw_logging_internal
+    absl::flags_usage_internal
+    absl::log
     absl::scoped_set_env
     absl::span
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -382,7 +392,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::flags_path_util
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -395,7 +405,7 @@
   DEPS
     absl::flags_program_name
     absl::strings
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -412,7 +422,21 @@
     absl::flags_usage
     absl::memory
     absl::strings
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    flags_sequence_lock_test
+  SRCS
+    "internal/sequence_lock_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::flags_internal
+    absl::time
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -427,7 +451,7 @@
     absl::flags_path_util
     absl::flags_program_name
     absl::strings
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -446,5 +470,5 @@
     absl::flags_reflection
     absl::flags_usage
     absl::strings
-    gtest
+    GTest::gmock
 )
diff --git a/abseil-cpp/absl/flags/commandlineflag.h b/abseil-cpp/absl/flags/commandlineflag.h
index f2fa089..c30aa60 100644
--- a/abseil-cpp/absl/flags/commandlineflag.h
+++ b/abseil-cpp/absl/flags/commandlineflag.h
@@ -186,7 +186,7 @@
   // command line.
   virtual bool IsSpecifiedOnCommandLine() const = 0;
 
-  // Validates supplied value usign validator or parseflag routine
+  // Validates supplied value using validator or parseflag routine
   virtual bool ValidateInputValue(absl::string_view value) const = 0;
 
   // Checks that flags default value can be converted to string and back to the
diff --git a/abseil-cpp/absl/flags/config.h b/abseil-cpp/absl/flags/config.h
index 813a925..14c4235 100644
--- a/abseil-cpp/absl/flags/config.h
+++ b/abseil-cpp/absl/flags/config.h
@@ -45,25 +45,6 @@
 #define ABSL_FLAGS_STRIP_HELP ABSL_FLAGS_STRIP_NAMES
 #endif
 
-// ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD macro is used for using atomics with
-// double words, e.g. absl::Duration.
-// For reasons in bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878, modern
-// versions of GCC do not support cmpxchg16b instruction in standard atomics.
-#ifdef ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD
-#error "ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD should not be defined."
-#elif defined(__clang__) && defined(__x86_64__) && \
-    defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
-#define ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD 1
-#endif
-
-// ABSL_FLAGS_INTERNAL_HAS_RTTI macro is used for selecting if we can use RTTI
-// for flag type identification.
-#ifdef ABSL_FLAGS_INTERNAL_HAS_RTTI
-#error ABSL_FLAGS_INTERNAL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
-#define ABSL_FLAGS_INTERNAL_HAS_RTTI 1
-#endif  // !defined(__GNUC__) || defined(__GXX_RTTI)
-
 // These macros represent the "source of truth" for the list of supported
 // built-in types.
 #define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
diff --git a/abseil-cpp/absl/flags/declare.h b/abseil-cpp/absl/flags/declare.h
index b9794d8..d1437bb 100644
--- a/abseil-cpp/absl/flags/declare.h
+++ b/abseil-cpp/absl/flags/declare.h
@@ -60,6 +60,14 @@
 // The ABSL_DECLARE_FLAG(type, name) macro expands to:
 //
 //   extern absl::Flag<type> FLAGS_name;
-#define ABSL_DECLARE_FLAG(type, name) extern ::absl::Flag<type> FLAGS_##name
+#define ABSL_DECLARE_FLAG(type, name) ABSL_DECLARE_FLAG_INTERNAL(type, name)
+
+// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its
+// arguments. Clients must use ABSL_DECLARE_FLAG instead.
+#define ABSL_DECLARE_FLAG_INTERNAL(type, name)               \
+  extern absl::Flag<type> FLAGS_##name;                      \
+  namespace absl /* block flags in namespaces */ {}          \
+  /* second redeclaration is to allow applying attributes */ \
+  extern absl::Flag<type> FLAGS_##name
 
 #endif  // ABSL_FLAGS_DECLARE_H_
diff --git a/abseil-cpp/absl/flags/flag.h b/abseil-cpp/absl/flags/flag.h
index a9cb2b7..b7f94be 100644
--- a/abseil-cpp/absl/flags/flag.h
+++ b/abseil-cpp/absl/flags/flag.h
@@ -67,105 +67,15 @@
 //      ABSL_FLAG(int, count, 0, "Count of items to process");
 //
 // No public methods of `absl::Flag<T>` are part of the Abseil Flags API.
+//
+// For type support of Abseil Flags, see the marshalling.h header file, which
+// discusses supported standard types, optional flags, and additional Abseil
+// type support.
 #if !defined(_MSC_VER) || defined(__clang__)
 template <typename T>
 using Flag = flags_internal::Flag<T>;
 #else
-// MSVC debug builds do not implement initialization with constexpr constructors
-// correctly. To work around this we add a level of indirection, so that the
-// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias
-// to that class) and dynamically allocates an instance when necessary. We also
-// forward all calls to internal::Flag methods via trampoline methods. In this
-// setup the `absl::Flag` class does not have constructor and virtual methods,
-// all the data members are public and thus MSVC is able to initialize it at
-// link time. To deal with multiple threads accessing the flag for the first
-// time concurrently we use an atomic boolean indicating if flag object is
-// initialized. We also employ the double-checked locking pattern where the
-// second level of protection is a global Mutex, so if two threads attempt to
-// construct the flag concurrently only one wins.
-// This solution is based on a recomendation here:
-// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454
-
-namespace flags_internal {
-absl::Mutex* GetGlobalConstructionGuard();
-}  // namespace flags_internal
-
-template <typename T>
-class Flag {
- public:
-  // No constructor and destructor to ensure this is an aggregate type.
-  // Visual Studio 2015 still requires the constructor for class to be
-  // constexpr initializable.
-#if _MSC_VER <= 1900
-  constexpr Flag(const char* name, const char* filename,
-                 const flags_internal::HelpGenFunc help_gen,
-                 const flags_internal::FlagDfltGenFunc default_value_gen)
-      : name_(name),
-        filename_(filename),
-        help_gen_(help_gen),
-        default_value_gen_(default_value_gen),
-        inited_(false),
-        impl_(nullptr) {}
-#endif
-
-  flags_internal::Flag<T>& GetImpl() const {
-    if (!inited_.load(std::memory_order_acquire)) {
-      absl::MutexLock l(flags_internal::GetGlobalConstructionGuard());
-
-      if (inited_.load(std::memory_order_acquire)) {
-        return *impl_;
-      }
-
-      impl_ = new flags_internal::Flag<T>(
-          name_, filename_,
-          {flags_internal::FlagHelpMsg(help_gen_),
-           flags_internal::FlagHelpKind::kGenFunc},
-          {flags_internal::FlagDefaultSrc(default_value_gen_),
-           flags_internal::FlagDefaultKind::kGenFunc});
-      inited_.store(true, std::memory_order_release);
-    }
-
-    return *impl_;
-  }
-
-  // Public methods of `absl::Flag<T>` are NOT part of the Abseil Flags API.
-  // See https://abseil.io/docs/cpp/guides/flags
-  bool IsRetired() const { return GetImpl().IsRetired(); }
-  absl::string_view Name() const { return GetImpl().Name(); }
-  std::string Help() const { return GetImpl().Help(); }
-  bool IsModified() const { return GetImpl().IsModified(); }
-  bool IsSpecifiedOnCommandLine() const {
-    return GetImpl().IsSpecifiedOnCommandLine();
-  }
-  std::string Filename() const { return GetImpl().Filename(); }
-  std::string DefaultValue() const { return GetImpl().DefaultValue(); }
-  std::string CurrentValue() const { return GetImpl().CurrentValue(); }
-  template <typename U>
-  inline bool IsOfType() const {
-    return GetImpl().template IsOfType<U>();
-  }
-  T Get() const {
-    return flags_internal::FlagImplPeer::InvokeGet<T>(GetImpl());
-  }
-  void Set(const T& v) {
-    flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v);
-  }
-  void InvokeCallback() { GetImpl().InvokeCallback(); }
-
-  const CommandLineFlag& Reflect() const {
-    return flags_internal::FlagImplPeer::InvokeReflect(GetImpl());
-  }
-
-  // The data members are logically private, but they need to be public for
-  // this to be an aggregate type.
-  const char* name_;
-  const char* filename_;
-  const flags_internal::HelpGenFunc help_gen_;
-  const flags_internal::FlagDfltGenFunc default_value_gen_;
-
-  mutable std::atomic<bool> inited_;
-  mutable flags_internal::Flag<T>* impl_;
-};
+#include "absl/flags/internal/flag_msvc.inc"
 #endif
 
 // GetFlag()
@@ -265,6 +175,8 @@
 //
 //   ABSL_FLAG(T, name, default_value, help).OnUpdate(callback);
 //
+// `callback` should be convertible to `void (*)()`.
+//
 // After any setting of the flag value, the callback will be called at least
 // once. A rapid sequence of changes may be merged together into the same
 // callback. No concurrent calls to the callback will be made for the same
@@ -279,7 +191,6 @@
 // Note: ABSL_FLAG.OnUpdate() does not have a public definition. Hence, this
 // comment serves as its API documentation.
 
-
 // -----------------------------------------------------------------------------
 // Implementation details below this section
 // -----------------------------------------------------------------------------
@@ -301,13 +212,15 @@
 #if ABSL_FLAGS_STRIP_NAMES
 #define ABSL_FLAG_IMPL_FLAGNAME(txt) ""
 #define ABSL_FLAG_IMPL_FILENAME() ""
-#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
-  absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag))
+#define ABSL_FLAG_IMPL_REGISTRAR(T, flag)                                      \
+  absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
+                                                nullptr)
 #else
 #define ABSL_FLAG_IMPL_FLAGNAME(txt) txt
 #define ABSL_FLAG_IMPL_FILENAME() __FILE__
-#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
-  absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag))
+#define ABSL_FLAG_IMPL_REGISTRAR(T, flag)                                     \
+  absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
+                                               __FILE__)
 #endif
 
 // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP
@@ -332,8 +245,8 @@
     /* default value argument. That keeps temporaries alive */               \
     /* long enough for NonConst to work correctly.          */               \
     static constexpr absl::string_view Value(                                \
-        absl::string_view v = ABSL_FLAG_IMPL_FLAGHELP(txt)) {                \
-      return v;                                                              \
+        absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt)) {   \
+      return absl_flag_help;                                                 \
     }                                                                        \
     static std::string NonConst() { return std::string(Value()); }           \
   };                                                                         \
@@ -345,8 +258,8 @@
 #define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value)     \
   struct AbslFlagDefaultGenFor##name {                                        \
     Type value = absl::flags_internal::InitDefaultValue<Type>(default_value); \
-    static void Gen(void* p) {                                                \
-      new (p) Type(AbslFlagDefaultGenFor##name{}.value);                      \
+    static void Gen(void* absl_flag_default_loc) {                            \
+      new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value);  \
     }                                                                         \
   };
 
@@ -356,6 +269,7 @@
 // global name for FLAGS_no<flag_name> symbol, thus preventing the possibility
 // of defining two flags with names foo and nofoo.
 #define ABSL_FLAG_IMPL(Type, name, default_value, help)                       \
+  extern ::absl::Flag<Type> FLAGS_##name;                                     \
   namespace absl /* block flags in namespaces */ {}                           \
   ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value)           \
   ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help)                             \
diff --git a/abseil-cpp/absl/flags/flag_benchmark.cc b/abseil-cpp/absl/flags/flag_benchmark.cc
index 7b52c9b..758a6a5 100644
--- a/abseil-cpp/absl/flags/flag_benchmark.cc
+++ b/abseil-cpp/absl/flags/flag_benchmark.cc
@@ -20,6 +20,8 @@
 
 #include "absl/flags/flag.h"
 #include "absl/flags/marshalling.h"
+#include "absl/flags/parse.h"
+#include "absl/flags/reflection.h"
 #include "absl/strings/string_view.h"
 #include "absl/time/time.h"
 #include "absl/types/optional.h"
@@ -99,28 +101,151 @@
   A(AbslDuration)            \
   A(UDT)
 
-#define FLAG_DEF(T) ABSL_FLAG(T, T##_flag, {}, "");
+#define REPLICATE_0(A, T, name, index) A(T, name, index)
+#define REPLICATE_1(A, T, name, index) \
+  REPLICATE_0(A, T, name, index##0) REPLICATE_0(A, T, name, index##1)
+#define REPLICATE_2(A, T, name, index) \
+  REPLICATE_1(A, T, name, index##0) REPLICATE_1(A, T, name, index##1)
+#define REPLICATE_3(A, T, name, index) \
+  REPLICATE_2(A, T, name, index##0) REPLICATE_2(A, T, name, index##1)
+#define REPLICATE_4(A, T, name, index) \
+  REPLICATE_3(A, T, name, index##0) REPLICATE_3(A, T, name, index##1)
+#define REPLICATE_5(A, T, name, index) \
+  REPLICATE_4(A, T, name, index##0) REPLICATE_4(A, T, name, index##1)
+#define REPLICATE_6(A, T, name, index) \
+  REPLICATE_5(A, T, name, index##0) REPLICATE_5(A, T, name, index##1)
+#define REPLICATE_7(A, T, name, index) \
+  REPLICATE_6(A, T, name, index##0) REPLICATE_6(A, T, name, index##1)
+#define REPLICATE_8(A, T, name, index) \
+  REPLICATE_7(A, T, name, index##0) REPLICATE_7(A, T, name, index##1)
+#define REPLICATE_9(A, T, name, index) \
+  REPLICATE_8(A, T, name, index##0) REPLICATE_8(A, T, name, index##1)
+#if defined(_MSC_VER)
+#define REPLICATE(A, T, name) \
+  REPLICATE_7(A, T, name, 0) REPLICATE_7(A, T, name, 1)
+#define SINGLE_FLAG(T) FLAGS_##T##_flag_00000000
+#else
+#define REPLICATE(A, T, name) \
+  REPLICATE_9(A, T, name, 0) REPLICATE_9(A, T, name, 1)
+#define SINGLE_FLAG(T) FLAGS_##T##_flag_0000000000
+#endif
+#define REPLICATE_ALL(A, T, name) \
+  REPLICATE_9(A, T, name, 0) REPLICATE_9(A, T, name, 1)
 
+#define COUNT(T, name, index) +1
+constexpr size_t kNumFlags = 0 REPLICATE(COUNT, _, _);
+
+#if defined(__clang__) && defined(__linux__)
+// Force the flags used for benchmarks into a separate ELF section.
+// This ensures that, even when other parts of the code might change size,
+// the layout of the flags across cachelines is kept constant. This makes
+// benchmark results more reproducible across unrelated code changes.
+#pragma clang section data = ".benchmark_flags"
+#endif
+#define DEFINE_FLAG(T, name, index) ABSL_FLAG(T, name##_##index, {}, "");
+#define FLAG_DEF(T) REPLICATE(DEFINE_FLAG, T, T##_flag);
 BENCHMARKED_TYPES(FLAG_DEF)
+#if defined(__clang__) && defined(__linux__)
+#pragma clang section data = ""
+#endif
+// Register thousands of flags to bloat up the size of the registry.
+// This mimics real life production binaries.
+#define BLOAT_FLAG(_unused1, _unused2, index) \
+  ABSL_FLAG(int, bloat_flag_##index, 0, "");
+REPLICATE_ALL(BLOAT_FLAG, _, _)
 
 namespace {
 
-#define BM_GetFlag(T)                                            \
-  void BM_GetFlag_##T(benchmark::State& state) {                 \
-    for (auto _ : state) {                                       \
-      benchmark::DoNotOptimize(absl::GetFlag(FLAGS_##T##_flag)); \
-    }                                                            \
-  }                                                              \
-  BENCHMARK(BM_GetFlag_##T);
+#define FLAG_PTR(T, name, index) &FLAGS_##name##_##index,
+#define FLAG_PTR_ARR(T)                              \
+  static constexpr absl::Flag<T>* FlagPtrs_##T[] = { \
+      REPLICATE(FLAG_PTR, T, T##_flag)};
+BENCHMARKED_TYPES(FLAG_PTR_ARR)
 
-BENCHMARKED_TYPES(BM_GetFlag)
+#define BM_SingleGetFlag(T)                                    \
+  void BM_SingleGetFlag_##T(benchmark::State& state) {         \
+    for (auto _ : state) {                                     \
+      benchmark::DoNotOptimize(absl::GetFlag(SINGLE_FLAG(T))); \
+    }                                                          \
+  }                                                            \
+  BENCHMARK(BM_SingleGetFlag_##T)->ThreadRange(1, 16);
+
+BENCHMARKED_TYPES(BM_SingleGetFlag)
+
+template <typename T>
+struct Accumulator {
+  using type = T;
+};
+template <>
+struct Accumulator<String> {
+  using type = size_t;
+};
+template <>
+struct Accumulator<VectorOfStrings> {
+  using type = size_t;
+};
+template <>
+struct Accumulator<OptionalInt> {
+  using type = bool;
+};
+template <>
+struct Accumulator<OptionalString> {
+  using type = bool;
+};
+template <>
+struct Accumulator<UDT> {
+  using type = bool;
+};
+
+template <typename T>
+void Accumulate(typename Accumulator<T>::type& a, const T& f) {
+  a += f;
+}
+void Accumulate(bool& a, bool f) { a = a || f; }
+void Accumulate(size_t& a, const std::string& f) { a += f.size(); }
+void Accumulate(size_t& a, const std::vector<std::string>& f) { a += f.size(); }
+void Accumulate(bool& a, const OptionalInt& f) { a |= f.has_value(); }
+void Accumulate(bool& a, const OptionalString& f) { a |= f.has_value(); }
+void Accumulate(bool& a, const UDT& f) {
+  a |= reinterpret_cast<int64_t>(&f) & 0x1;
+}
+
+#define BM_ManyGetFlag(T)                            \
+  void BM_ManyGetFlag_##T(benchmark::State& state) { \
+    Accumulator<T>::type res = {};                   \
+    while (state.KeepRunningBatch(kNumFlags)) {      \
+      for (auto* flag_ptr : FlagPtrs_##T) {          \
+        Accumulate(res, absl::GetFlag(*flag_ptr));   \
+      }                                              \
+    }                                                \
+    benchmark::DoNotOptimize(res);                   \
+  }                                                  \
+  BENCHMARK(BM_ManyGetFlag_##T)->ThreadRange(1, 8);
+
+BENCHMARKED_TYPES(BM_ManyGetFlag)
+
+void BM_ThreadedFindCommandLineFlag(benchmark::State& state) {
+  char dummy[] = "dummy";
+  char* argv[] = {dummy};
+  // We need to ensure that flags have been parsed. That is where the registry
+  // is finalized.
+  absl::ParseCommandLine(1, argv);
+
+  while (state.KeepRunningBatch(kNumFlags)) {
+    for (auto* flag_ptr : FlagPtrs_bool) {
+      benchmark::DoNotOptimize(absl::FindCommandLineFlag(flag_ptr->Name()));
+    }
+  }
+}
+BENCHMARK(BM_ThreadedFindCommandLineFlag)->ThreadRange(1, 16);
 
 }  // namespace
 
-#define InvokeGetFlag(T)                                               \
-  T AbslInvokeGetFlag##T() { return absl::GetFlag(FLAGS_##T##_flag); } \
+#ifdef __llvm__
+// To view disassembly use: gdb ${BINARY}  -batch -ex "disassemble /s $FUNC"
+#define InvokeGetFlag(T)                                             \
+  T AbslInvokeGetFlag##T() { return absl::GetFlag(SINGLE_FLAG(T)); } \
   int odr##T = (benchmark::DoNotOptimize(AbslInvokeGetFlag##T), 1);
 
 BENCHMARKED_TYPES(InvokeGetFlag)
-
-// To veiw disassembly use: gdb ${BINARY}  -batch -ex "disassemble /s $FUNC"
+#endif  // __llvm__
diff --git a/abseil-cpp/absl/flags/flag_benchmark.lds b/abseil-cpp/absl/flags/flag_benchmark.lds
new file mode 100644
index 0000000..af115df
--- /dev/null
+++ b/abseil-cpp/absl/flags/flag_benchmark.lds
@@ -0,0 +1,13 @@
+/* This linker script forces the flags used by flags_benchmark
+ * into a separate page-aligned section. This isn't necessary for
+ * correctness but ensures that the benchmark results are more
+ * reproducible across unrelated code changes.
+ */
+SECTIONS {
+  .benchmark_flags : {
+    . = ALIGN(0x1000);
+    * (.benchmark_flags);
+  }
+}
+
+INSERT AFTER .data
diff --git a/abseil-cpp/absl/flags/flag_test.cc b/abseil-cpp/absl/flags/flag_test.cc
index 654c812..f9cda02 100644
--- a/abseil-cpp/absl/flags/flag_test.cc
+++ b/abseil-cpp/absl/flags/flag_test.cc
@@ -18,6 +18,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <atomic>
 #include <cmath>
 #include <new>
 #include <string>
@@ -26,12 +27,14 @@
 
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
+#include "absl/base/macros.h"
 #include "absl/flags/config.h"
 #include "absl/flags/declare.h"
 #include "absl/flags/internal/flag.h"
 #include "absl/flags/marshalling.h"
 #include "absl/flags/reflection.h"
 #include "absl/flags/usage_config.h"
+#include "absl/numeric/int128.h"
 #include "absl/strings/match.h"
 #include "absl/strings/numbers.h"
 #include "absl/strings/str_cat.h"
@@ -59,6 +62,7 @@
 struct UDT {
   UDT() = default;
   UDT(const UDT&) = default;
+  UDT& operator=(const UDT&) = default;
 };
 bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
 std::string AbslUnparseFlag(const UDT&) { return ""; }
@@ -100,30 +104,35 @@
 
 TEST_F(FlagTest, Traits) {
   EXPECT_EQ(flags::StorageKind<int>(),
-            flags::FlagValueStorageKind::kOneWordAtomic);
+            flags::FlagValueStorageKind::kValueAndInitBit);
   EXPECT_EQ(flags::StorageKind<bool>(),
-            flags::FlagValueStorageKind::kOneWordAtomic);
+            flags::FlagValueStorageKind::kValueAndInitBit);
   EXPECT_EQ(flags::StorageKind<double>(),
             flags::FlagValueStorageKind::kOneWordAtomic);
   EXPECT_EQ(flags::StorageKind<int64_t>(),
             flags::FlagValueStorageKind::kOneWordAtomic);
 
-#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD)
   EXPECT_EQ(flags::StorageKind<S1>(),
-            flags::FlagValueStorageKind::kTwoWordsAtomic);
+            flags::FlagValueStorageKind::kSequenceLocked);
   EXPECT_EQ(flags::StorageKind<S2>(),
-            flags::FlagValueStorageKind::kTwoWordsAtomic);
-#else
-  EXPECT_EQ(flags::StorageKind<S1>(),
-            flags::FlagValueStorageKind::kAlignedBuffer);
-  EXPECT_EQ(flags::StorageKind<S2>(),
-            flags::FlagValueStorageKind::kAlignedBuffer);
+            flags::FlagValueStorageKind::kSequenceLocked);
+// Make sure absl::Duration uses the sequence-locked code path. MSVC 2015
+// doesn't consider absl::Duration to be trivially-copyable so we just
+// restrict this to clang as it seems to be a well-behaved compiler.
+#ifdef __clang__
+  EXPECT_EQ(flags::StorageKind<absl::Duration>(),
+            flags::FlagValueStorageKind::kSequenceLocked);
 #endif
 
   EXPECT_EQ(flags::StorageKind<std::string>(),
             flags::FlagValueStorageKind::kAlignedBuffer);
   EXPECT_EQ(flags::StorageKind<std::vector<std::string>>(),
             flags::FlagValueStorageKind::kAlignedBuffer);
+
+  EXPECT_EQ(flags::StorageKind<absl::int128>(),
+            flags::FlagValueStorageKind::kSequenceLocked);
+  EXPECT_EQ(flags::StorageKind<absl::uint128>(),
+            flags::FlagValueStorageKind::kSequenceLocked);
 }
 
 // --------------------------------------------------------------------
@@ -132,6 +141,8 @@
                                       flags::FlagHelpKind::kLiteral};
 
 using String = std::string;
+using int128 = absl::int128;
+using uint128 = absl::uint128;
 
 #if !defined(_MSC_VER) || defined(__clang__)
 #define DEFINE_CONSTRUCTED_FLAG(T, dflt, dflt_kind)                        \
@@ -168,6 +179,8 @@
 DEFINE_CONSTRUCTED_FLAG(double, 9.10, kOneWord);
 DEFINE_CONSTRUCTED_FLAG(String, &TestMakeDflt<String>, kGenFunc);
 DEFINE_CONSTRUCTED_FLAG(UDT, &TestMakeDflt<UDT>, kGenFunc);
+DEFINE_CONSTRUCTED_FLAG(int128, 13, kGenFunc);
+DEFINE_CONSTRUCTED_FLAG(uint128, 14, kGenFunc);
 
 template <typename T>
 bool TestConstructionFor(const absl::Flag<T>& f1, absl::Flag<T>& f2) {
@@ -175,7 +188,7 @@
   EXPECT_EQ(absl::GetFlagReflectionHandle(f1).Help(), "literal help");
   EXPECT_EQ(absl::GetFlagReflectionHandle(f1).Filename(), "file");
 
-  flags::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(f2))
+  flags::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(f2), nullptr)
       .OnUpdate(TestCallback);
 
   EXPECT_EQ(absl::GetFlagReflectionHandle(f2).Name(), "f2");
@@ -199,6 +212,8 @@
   TEST_CONSTRUCTED_FLAG(double);
   TEST_CONSTRUCTED_FLAG(String);
   TEST_CONSTRUCTED_FLAG(UDT);
+  TEST_CONSTRUCTED_FLAG(int128);
+  TEST_CONSTRUCTED_FLAG(uint128);
 }
 
 // --------------------------------------------------------------------
@@ -217,6 +232,8 @@
 ABSL_DECLARE_FLAG(float, test_flag_10);
 ABSL_DECLARE_FLAG(std::string, test_flag_11);
 ABSL_DECLARE_FLAG(absl::Duration, test_flag_12);
+ABSL_DECLARE_FLAG(absl::int128, test_flag_13);
+ABSL_DECLARE_FLAG(absl::uint128, test_flag_14);
 
 namespace {
 
@@ -248,6 +265,10 @@
             "test_flag_11");
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_12).Name(),
             "test_flag_12");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Name(),
+            "test_flag_13");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Name(),
+            "test_flag_14");
 }
 #endif  // !ABSL_FLAGS_STRIP_NAMES
 
@@ -267,6 +288,9 @@
 ABSL_FLAG(float, test_flag_10, 1.234e12f, "test flag 10");
 ABSL_FLAG(std::string, test_flag_11, "", "test flag 11");
 ABSL_FLAG(absl::Duration, test_flag_12, absl::Minutes(10), "test flag 12");
+ABSL_FLAG(absl::int128, test_flag_13, absl::MakeInt128(-1, 0), "test flag 13");
+ABSL_FLAG(absl::uint128, test_flag_14, absl::MakeUint128(0, 0xFFFAAABBBCCCDDD),
+          "test flag 14");
 
 namespace {
 
@@ -381,6 +405,24 @@
       absl::GetFlagReflectionHandle(FLAGS_test_flag_12).Filename(),
       expected_file_name))
       << absl::GetFlagReflectionHandle(FLAGS_test_flag_12).Filename();
+
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Name(),
+            "test_flag_13");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Help(),
+            "test flag 13");
+  EXPECT_TRUE(absl::EndsWith(
+      absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Filename(),
+      expected_file_name))
+      << absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Filename();
+
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Name(),
+            "test_flag_14");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Help(),
+            "test flag 14");
+  EXPECT_TRUE(absl::EndsWith(
+      absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Filename(),
+      expected_file_name))
+      << absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Filename();
 }
 #endif  // !ABSL_FLAGS_STRIP_NAMES
 
@@ -411,6 +453,10 @@
             "");
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_12).DefaultValue(),
             "10m");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).DefaultValue(),
+            "-18446744073709551616");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).DefaultValue(),
+            "1152827684197027293");
 
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_01).CurrentValue(),
             "true");
@@ -436,6 +482,10 @@
             "");
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_12).CurrentValue(),
             "10m");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).CurrentValue(),
+            "-18446744073709551616");
+  EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).CurrentValue(),
+            "1152827684197027293");
 
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), true);
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_02), 1234);
@@ -449,6 +499,9 @@
   EXPECT_NEAR(absl::GetFlag(FLAGS_test_flag_10), 1.234e12f, 1e5f);
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_11), "");
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_12), absl::Minutes(10));
+  EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_13), absl::MakeInt128(-1, 0));
+  EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_14),
+            absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
 }
 
 // --------------------------------------------------------------------
@@ -550,6 +603,13 @@
 
   absl::SetFlag(&FLAGS_test_flag_12, absl::Seconds(110));
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_12), absl::Seconds(110));
+
+  absl::SetFlag(&FLAGS_test_flag_13, absl::MakeInt128(-1, 0));
+  EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_13), absl::MakeInt128(-1, 0));
+
+  absl::SetFlag(&FLAGS_test_flag_14, absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
+  EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_14),
+            absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
 }
 
 // --------------------------------------------------------------------
@@ -579,6 +639,48 @@
   EXPECT_EQ(*handle->TryGet<std::string>(), "");
   handle = absl::FindCommandLineFlag("test_flag_12");
   EXPECT_EQ(*handle->TryGet<absl::Duration>(), absl::Minutes(10));
+  handle = absl::FindCommandLineFlag("test_flag_13");
+  EXPECT_EQ(*handle->TryGet<absl::int128>(), absl::MakeInt128(-1, 0));
+  handle = absl::FindCommandLineFlag("test_flag_14");
+  EXPECT_EQ(*handle->TryGet<absl::uint128>(),
+            absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, ConcurrentSetAndGet) {
+  static constexpr int kNumThreads = 8;
+  // Two arbitrary durations. One thread will concurrently flip the flag
+  // between these two values, while the other threads read it and verify
+  // that no other value is seen.
+  static const absl::Duration kValidDurations[] = {
+      absl::Seconds(int64_t{0x6cebf47a9b68c802}) + absl::Nanoseconds(229702057),
+      absl::Seconds(int64_t{0x23fec0307e4e9d3}) + absl::Nanoseconds(44555374)};
+  absl::SetFlag(&FLAGS_test_flag_12, kValidDurations[0]);
+
+  std::atomic<bool> stop{false};
+  std::vector<std::thread> threads;
+  auto* handle = absl::FindCommandLineFlag("test_flag_12");
+  for (int i = 0; i < kNumThreads; i++) {
+    threads.emplace_back([&]() {
+      while (!stop.load(std::memory_order_relaxed)) {
+        // Try loading the flag both directly and via a reflection
+        // handle.
+        absl::Duration v = absl::GetFlag(FLAGS_test_flag_12);
+        EXPECT_TRUE(v == kValidDurations[0] || v == kValidDurations[1]);
+        v = *handle->TryGet<absl::Duration>();
+        EXPECT_TRUE(v == kValidDurations[0] || v == kValidDurations[1]);
+      }
+    });
+  }
+  absl::Time end_time = absl::Now() + absl::Seconds(1);
+  int i = 0;
+  while (absl::Now() < end_time) {
+    absl::SetFlag(&FLAGS_test_flag_12,
+                  kValidDurations[i++ % ABSL_ARRAYSIZE(kValidDurations)]);
+  }
+  stop.store(true, std::memory_order_relaxed);
+  for (auto& t : threads) t.join();
 }
 
 // --------------------------------------------------------------------
@@ -684,6 +786,8 @@
 namespace {
 
 TEST_F(FlagTest, TestCustomUDT) {
+  EXPECT_EQ(flags::StorageKind<CustomUDT>(),
+            flags::FlagValueStorageKind::kOneWordAtomic);
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_custom_udt), CustomUDT(1, 1));
   absl::SetFlag(&FLAGS_test_flag_custom_udt, CustomUDT(2, 3));
   EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_custom_udt), CustomUDT(2, 3));
@@ -812,7 +916,9 @@
 ABSL_RETIRED_FLAG(int, old_int_flag, (int)std::sqrt(10), "old descr");
 ABSL_RETIRED_FLAG(std::string, old_str_flag, "", absl::StrCat("old ", "descr"));
 
-bool initializaion_order_fiasco_test = [] {
+namespace {
+
+bool initialization_order_fiasco_test ABSL_ATTRIBUTE_UNUSED = [] {
   // Iterate over all the flags during static initialization.
   // This should not trigger ASan's initialization-order-fiasco.
   auto* handle1 = absl::FindCommandLineFlag("flag_on_separate_file");
@@ -823,8 +929,6 @@
   return true;
 }();
 
-namespace {
-
 TEST_F(FlagTest, TestRetiredFlagRegistration) {
   auto* handle = absl::FindCommandLineFlag("old_bool_flag");
   EXPECT_TRUE(handle->IsOfType<bool>());
@@ -904,3 +1008,221 @@
 }
 
 }  // namespace
+
+// --------------------------------------------------------------------
+
+namespace {
+
+enum TestE { A = 1, B = 2, C = 3 };
+
+struct EnumWrapper {
+  EnumWrapper() : e(A) {}
+
+  TestE e;
+};
+
+bool AbslParseFlag(absl::string_view, EnumWrapper*, std::string*) {
+  return true;
+}
+std::string AbslUnparseFlag(const EnumWrapper&) { return ""; }
+
+}  // namespace
+
+ABSL_FLAG(EnumWrapper, test_enum_wrapper_flag, {}, "help");
+
+TEST_F(FlagTest, TesTypeWrappingEnum) {
+  EnumWrapper value = absl::GetFlag(FLAGS_test_enum_wrapper_flag);
+  EXPECT_EQ(value.e, A);
+
+  value.e = B;
+  absl::SetFlag(&FLAGS_test_enum_wrapper_flag, value);
+  value = absl::GetFlag(FLAGS_test_enum_wrapper_flag);
+  EXPECT_EQ(value.e, B);
+}
+
+// This is a compile test to ensure macros are expanded within ABSL_FLAG and
+// ABSL_DECLARE_FLAG.
+#define FLAG_NAME_MACRO(name) prefix_##name
+ABSL_DECLARE_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag));
+ABSL_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag), 0,
+          "Testing macro expansion within ABSL_FLAG");
+
+TEST_F(FlagTest, MacroWithinAbslFlag) {
+  EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 0);
+  absl::SetFlag(&FLAGS_prefix_test_macro_named_flag, 1);
+  EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 1);
+}
+
+// --------------------------------------------------------------------
+
+#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 5
+#define ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
+#endif
+
+#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
+ABSL_FLAG(absl::optional<bool>, optional_bool, absl::nullopt, "help");
+#endif
+ABSL_FLAG(absl::optional<int>, optional_int, {}, "help");
+ABSL_FLAG(absl::optional<double>, optional_double, 9.3, "help");
+ABSL_FLAG(absl::optional<std::string>, optional_string, absl::nullopt, "help");
+ABSL_FLAG(absl::optional<absl::Duration>, optional_duration, absl::nullopt,
+          "help");
+ABSL_FLAG(absl::optional<absl::optional<int>>, optional_optional_int,
+          absl::nullopt, "help");
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+ABSL_FLAG(std::optional<int64_t>, std_optional_int64, std::nullopt, "help");
+#endif
+
+namespace {
+
+#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
+TEST_F(FlagTest, TestOptionalBool) {
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt);
+
+  absl::SetFlag(&FLAGS_optional_bool, false);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), false);
+
+  absl::SetFlag(&FLAGS_optional_bool, true);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), true);
+
+  absl::SetFlag(&FLAGS_optional_bool, absl::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+#endif
+
+TEST_F(FlagTest, TestOptionalInt) {
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt);
+
+  absl::SetFlag(&FLAGS_optional_int, 0);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 0);
+
+  absl::SetFlag(&FLAGS_optional_int, 10);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 10);
+
+  absl::SetFlag(&FLAGS_optional_int, absl::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalDouble) {
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value());
+  EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 9.3);
+
+  absl::SetFlag(&FLAGS_optional_double, 0.0);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), 0.0);
+
+  absl::SetFlag(&FLAGS_optional_double, 1.234);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value());
+  EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 1.234);
+
+  absl::SetFlag(&FLAGS_optional_double, absl::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_double).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalString) {
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt);
+
+  // Setting optional string to "" leads to undefined behavior.
+
+  absl::SetFlag(&FLAGS_optional_string, " ");
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), " ");
+
+  absl::SetFlag(&FLAGS_optional_string, "QWERTY");
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), "QWERTY");
+
+  absl::SetFlag(&FLAGS_optional_string, absl::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalDuration) {
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt);
+
+  absl::SetFlag(&FLAGS_optional_duration, absl::ZeroDuration());
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Seconds(0));
+
+  absl::SetFlag(&FLAGS_optional_duration, absl::Hours(3));
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Hours(3));
+
+  absl::SetFlag(&FLAGS_optional_duration, absl::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalOptional) {
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt);
+
+  absl::optional<int> nullint{absl::nullopt};
+
+  absl::SetFlag(&FLAGS_optional_optional_int, nullint);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+  EXPECT_NE(absl::GetFlag(FLAGS_optional_optional_int), nullint);
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int),
+            absl::optional<absl::optional<int>>{nullint});
+
+  absl::SetFlag(&FLAGS_optional_optional_int, 0);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0);
+
+  absl::SetFlag(&FLAGS_optional_optional_int, absl::optional<int>{0});
+  EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0);
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::optional<int>{0});
+
+  absl::SetFlag(&FLAGS_optional_optional_int, absl::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+
+TEST_F(FlagTest, TestStdOptional) {
+  EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt);
+
+  absl::SetFlag(&FLAGS_std_optional_int64, 0);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0);
+
+  absl::SetFlag(&FLAGS_std_optional_int64, 0xFFFFFFFFFF16);
+  EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0xFFFFFFFFFF16);
+
+  absl::SetFlag(&FLAGS_std_optional_int64, std::nullopt);
+  EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+  EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+#endif
+
+}  // namespace
diff --git a/abseil-cpp/absl/flags/internal/commandlineflag.cc b/abseil-cpp/absl/flags/internal/commandlineflag.cc
index 4482955..3c114d1 100644
--- a/abseil-cpp/absl/flags/internal/commandlineflag.cc
+++ b/abseil-cpp/absl/flags/internal/commandlineflag.cc
@@ -19,7 +19,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace flags_internal {
 
-FlagStateInterface::~FlagStateInterface() {}
+FlagStateInterface::~FlagStateInterface() = default;
 
 }  // namespace flags_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/flags/internal/commandlineflag.h b/abseil-cpp/absl/flags/internal/commandlineflag.h
index cb46fe2..ebfe81b 100644
--- a/abseil-cpp/absl/flags/internal/commandlineflag.h
+++ b/abseil-cpp/absl/flags/internal/commandlineflag.h
@@ -24,7 +24,7 @@
 namespace flags_internal {
 
 // An alias for flag fast type id. This value identifies the flag value type
-// simialarly to typeid(T), without relying on RTTI being available. In most
+// similarly to typeid(T), without relying on RTTI being available. In most
 // cases this id is enough to uniquely identify the flag's value type. In a few
 // cases we'll have to resort to using actual RTTI implementation if it is
 // available.
diff --git a/abseil-cpp/absl/flags/internal/flag.cc b/abseil-cpp/absl/flags/internal/flag.cc
index 1502e7f..65d0e58 100644
--- a/abseil-cpp/absl/flags/internal/flag.cc
+++ b/abseil-cpp/absl/flags/internal/flag.cc
@@ -30,6 +30,7 @@
 #include "absl/base/call_once.h"
 #include "absl/base/casts.h"
 #include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
 #include "absl/base/optimization.h"
 #include "absl/flags/config.h"
 #include "absl/flags/internal/commandlineflag.h"
@@ -96,7 +97,8 @@
         counter_(counter) {}
 
   ~FlagState() override {
-    if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer)
+    if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer &&
+        flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked)
       return;
     flags_internal::Delete(flag_impl_.op_, value_.heap_allocated);
   }
@@ -118,11 +120,9 @@
   union SavedValue {
     explicit SavedValue(void* v) : heap_allocated(v) {}
     explicit SavedValue(int64_t v) : one_word(v) {}
-    explicit SavedValue(flags_internal::AlignedTwoWords v) : two_words(v) {}
 
     void* heap_allocated;
     int64_t one_word;
-    flags_internal::AlignedTwoWords two_words;
   } value_;
   bool modified_;
   bool on_command_line_;
@@ -146,12 +146,7 @@
   auto def_kind = static_cast<FlagDefaultKind>(def_kind_);
 
   switch (ValueStorageKind()) {
-    case FlagValueStorageKind::kAlignedBuffer:
-      // For this storage kind the default_value_ always points to gen_func
-      // during initialization.
-      assert(def_kind == FlagDefaultKind::kGenFunc);
-      (*default_value_.gen_func)(AlignedBufferValue());
-      break;
+    case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic: {
       alignas(int64_t) std::array<char, sizeof(int64_t)> buf{};
       if (def_kind == FlagDefaultKind::kGenFunc) {
@@ -160,21 +155,33 @@
         assert(def_kind != FlagDefaultKind::kDynamicValue);
         std::memcpy(buf.data(), &default_value_, Sizeof(op_));
       }
+      if (ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit) {
+        // We presume here the memory layout of FlagValueAndInitBit struct.
+        uint8_t initialized = 1;
+        std::memcpy(buf.data() + Sizeof(op_), &initialized,
+                    sizeof(initialized));
+      }
+      // Type can contain valid uninitialized bits, e.g. padding.
+      ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buf.data(), buf.size());
       OneWordValue().store(absl::bit_cast<int64_t>(buf),
                            std::memory_order_release);
       break;
     }
-    case FlagValueStorageKind::kTwoWordsAtomic: {
+    case FlagValueStorageKind::kSequenceLocked: {
       // For this storage kind the default_value_ always points to gen_func
       // during initialization.
       assert(def_kind == FlagDefaultKind::kGenFunc);
-      alignas(AlignedTwoWords) std::array<char, sizeof(AlignedTwoWords)> buf{};
-      (*default_value_.gen_func)(buf.data());
-      auto atomic_value = absl::bit_cast<AlignedTwoWords>(buf);
-      TwoWordsValue().store(atomic_value, std::memory_order_release);
+      (*default_value_.gen_func)(AtomicBufferValue());
       break;
     }
+    case FlagValueStorageKind::kAlignedBuffer:
+      // For this storage kind the default_value_ always points to gen_func
+      // during initialization.
+      assert(def_kind == FlagDefaultKind::kGenFunc);
+      (*default_value_.gen_func)(AlignedBufferValue());
+      break;
   }
+  seq_lock_.MarkInitialized();
 }
 
 absl::Mutex* FlagImpl::DataGuard() const {
@@ -190,7 +197,7 @@
   FlagFastTypeId lhs_type_id = flags_internal::FastTypeId(op_);
 
   // `rhs_type_id` is the fast type id corresponding to the declaration
-  // visibile at the call site. `lhs_type_id` is the fast type id
+  // visible at the call site. `lhs_type_id` is the fast type id
   // corresponding to the type specified in flag definition. They must match
   //  for this operation to be well-defined.
   if (ABSL_PREDICT_TRUE(lhs_type_id == rhs_type_id)) return;
@@ -201,7 +208,7 @@
 
   if (lhs_runtime_type_id == rhs_runtime_type_id) return;
 
-#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI)
+#ifdef ABSL_INTERNAL_HAS_RTTI
   if (*lhs_runtime_type_id == *rhs_runtime_type_id) return;
 #endif
 
@@ -229,25 +236,25 @@
 
 void FlagImpl::StoreValue(const void* src) {
   switch (ValueStorageKind()) {
-    case FlagValueStorageKind::kAlignedBuffer:
-      Copy(op_, src, AlignedBufferValue());
-      break;
+    case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic: {
-      int64_t one_word_val = 0;
+      // Load the current value to avoid setting 'init' bit manually.
+      int64_t one_word_val = OneWordValue().load(std::memory_order_acquire);
       std::memcpy(&one_word_val, src, Sizeof(op_));
       OneWordValue().store(one_word_val, std::memory_order_release);
+      seq_lock_.IncrementModificationCount();
       break;
     }
-    case FlagValueStorageKind::kTwoWordsAtomic: {
-      AlignedTwoWords two_words_val{0, 0};
-      std::memcpy(&two_words_val, src, Sizeof(op_));
-      TwoWordsValue().store(two_words_val, std::memory_order_release);
+    case FlagValueStorageKind::kSequenceLocked: {
+      seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_));
       break;
     }
+    case FlagValueStorageKind::kAlignedBuffer:
+      Copy(op_, src, AlignedBufferValue());
+      seq_lock_.IncrementModificationCount();
+      break;
   }
-
   modified_ = true;
-  ++counter_;
   InvokeCallback();
 }
 
@@ -266,6 +273,10 @@
   return flags_internal::FastTypeId(op_);
 }
 
+int64_t FlagImpl::ModificationCount() const {
+  return seq_lock_.ModificationCount();
+}
+
 bool FlagImpl::IsSpecifiedOnCommandLine() const {
   absl::MutexLock l(DataGuard());
   return on_command_line_;
@@ -281,21 +292,22 @@
 std::string FlagImpl::CurrentValue() const {
   auto* guard = DataGuard();  // Make sure flag initialized
   switch (ValueStorageKind()) {
-    case FlagValueStorageKind::kAlignedBuffer: {
-      absl::MutexLock l(guard);
-      return flags_internal::Unparse(op_, AlignedBufferValue());
-    }
+    case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic: {
       const auto one_word_val =
           absl::bit_cast<std::array<char, sizeof(int64_t)>>(
               OneWordValue().load(std::memory_order_acquire));
       return flags_internal::Unparse(op_, one_word_val.data());
     }
-    case FlagValueStorageKind::kTwoWordsAtomic: {
-      const auto two_words_val =
-          absl::bit_cast<std::array<char, sizeof(AlignedTwoWords)>>(
-              TwoWordsValue().load(std::memory_order_acquire));
-      return flags_internal::Unparse(op_, two_words_val.data());
+    case FlagValueStorageKind::kSequenceLocked: {
+      std::unique_ptr<void, DynValueDeleter> cloned(flags_internal::Alloc(op_),
+                                                    DynValueDeleter{op_});
+      ReadSequenceLockedData(cloned.get());
+      return flags_internal::Unparse(op_, cloned.get());
+    }
+    case FlagValueStorageKind::kAlignedBuffer: {
+      absl::MutexLock l(guard);
+      return flags_internal::Unparse(op_, AlignedBufferValue());
     }
   }
 
@@ -342,20 +354,26 @@
   bool modified = modified_;
   bool on_command_line = on_command_line_;
   switch (ValueStorageKind()) {
-    case FlagValueStorageKind::kAlignedBuffer: {
-      return absl::make_unique<FlagState>(
-          *this, flags_internal::Clone(op_, AlignedBufferValue()), modified,
-          on_command_line, counter_);
-    }
+    case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic: {
       return absl::make_unique<FlagState>(
           *this, OneWordValue().load(std::memory_order_acquire), modified,
-          on_command_line, counter_);
+          on_command_line, ModificationCount());
     }
-    case FlagValueStorageKind::kTwoWordsAtomic: {
+    case FlagValueStorageKind::kSequenceLocked: {
+      void* cloned = flags_internal::Alloc(op_);
+      // Read is guaranteed to be successful because we hold the lock.
+      bool success =
+          seq_lock_.TryRead(cloned, AtomicBufferValue(), Sizeof(op_));
+      assert(success);
+      static_cast<void>(success);
+      return absl::make_unique<FlagState>(*this, cloned, modified,
+                                          on_command_line, ModificationCount());
+    }
+    case FlagValueStorageKind::kAlignedBuffer: {
       return absl::make_unique<FlagState>(
-          *this, TwoWordsValue().load(std::memory_order_acquire), modified,
-          on_command_line, counter_);
+          *this, flags_internal::Clone(op_, AlignedBufferValue()), modified,
+          on_command_line, ModificationCount());
     }
   }
   return nullptr;
@@ -363,20 +381,18 @@
 
 bool FlagImpl::RestoreState(const FlagState& flag_state) {
   absl::MutexLock l(DataGuard());
-
-  if (flag_state.counter_ == counter_) {
+  if (flag_state.counter_ == ModificationCount()) {
     return false;
   }
 
   switch (ValueStorageKind()) {
-    case FlagValueStorageKind::kAlignedBuffer:
-      StoreValue(flag_state.value_.heap_allocated);
-      break;
+    case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic:
       StoreValue(&flag_state.value_.one_word);
       break;
-    case FlagValueStorageKind::kTwoWordsAtomic:
-      StoreValue(&flag_state.value_.two_words);
+    case FlagValueStorageKind::kSequenceLocked:
+    case FlagValueStorageKind::kAlignedBuffer:
+      StoreValue(flag_state.value_.heap_allocated);
       break;
   }
 
@@ -390,7 +406,7 @@
 StorageT* FlagImpl::OffsetValue() const {
   char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this));
   // The offset is deduced via Flag value type specific op_.
-  size_t offset = flags_internal::ValueOffset(op_);
+  ptrdiff_t offset = flags_internal::ValueOffset(op_);
 
   return reinterpret_cast<StorageT*>(p + offset);
 }
@@ -400,14 +416,15 @@
   return OffsetValue<void>();
 }
 
-std::atomic<int64_t>& FlagImpl::OneWordValue() const {
-  assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic);
-  return OffsetValue<FlagOneWordValue>()->value;
+std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const {
+  assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked);
+  return OffsetValue<std::atomic<uint64_t>>();
 }
 
-std::atomic<AlignedTwoWords>& FlagImpl::TwoWordsValue() const {
-  assert(ValueStorageKind() == FlagValueStorageKind::kTwoWordsAtomic);
-  return OffsetValue<FlagTwoWordsValue>()->value;
+std::atomic<int64_t>& FlagImpl::OneWordValue() const {
+  assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic ||
+         ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit);
+  return OffsetValue<FlagOneWordValue>()->value;
 }
 
 // Attempts to parse supplied `value` string using parsing routine in the `flag`
@@ -432,26 +449,56 @@
 void FlagImpl::Read(void* dst) const {
   auto* guard = DataGuard();  // Make sure flag initialized
   switch (ValueStorageKind()) {
-    case FlagValueStorageKind::kAlignedBuffer: {
-      absl::MutexLock l(guard);
-      flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst);
-      break;
-    }
+    case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic: {
       const int64_t one_word_val =
           OneWordValue().load(std::memory_order_acquire);
       std::memcpy(dst, &one_word_val, Sizeof(op_));
       break;
     }
-    case FlagValueStorageKind::kTwoWordsAtomic: {
-      const AlignedTwoWords two_words_val =
-          TwoWordsValue().load(std::memory_order_acquire);
-      std::memcpy(dst, &two_words_val, Sizeof(op_));
+    case FlagValueStorageKind::kSequenceLocked: {
+      ReadSequenceLockedData(dst);
+      break;
+    }
+    case FlagValueStorageKind::kAlignedBuffer: {
+      absl::MutexLock l(guard);
+      flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst);
       break;
     }
   }
 }
 
+int64_t FlagImpl::ReadOneWord() const {
+  assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic ||
+         ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit);
+  auto* guard = DataGuard();  // Make sure flag initialized
+  (void)guard;
+  return OneWordValue().load(std::memory_order_acquire);
+}
+
+bool FlagImpl::ReadOneBool() const {
+  assert(ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit);
+  auto* guard = DataGuard();  // Make sure flag initialized
+  (void)guard;
+  return absl::bit_cast<FlagValueAndInitBit<bool>>(
+             OneWordValue().load(std::memory_order_acquire))
+      .value;
+}
+
+void FlagImpl::ReadSequenceLockedData(void* dst) const {
+  size_t size = Sizeof(op_);
+  // Attempt to read using the sequence lock.
+  if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) {
+    return;
+  }
+  // We failed due to contention. Acquire the lock to prevent contention
+  // and try again.
+  absl::ReaderMutexLock l(DataGuard());
+  bool success = seq_lock_.TryRead(dst, AtomicBufferValue(), size);
+  assert(success);
+  static_cast<void>(success);
+}
+
 void FlagImpl::Write(const void* src) {
   absl::MutexLock l(DataGuard());
 
diff --git a/abseil-cpp/absl/flags/internal/flag.h b/abseil-cpp/absl/flags/internal/flag.h
index 370d8a0..b41f9a6 100644
--- a/abseil-cpp/absl/flags/internal/flag.h
+++ b/abseil-cpp/absl/flags/internal/flag.h
@@ -29,6 +29,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/call_once.h"
+#include "absl/base/casts.h"
 #include "absl/base/config.h"
 #include "absl/base/optimization.h"
 #include "absl/base/thread_annotations.h"
@@ -36,6 +37,7 @@
 #include "absl/flags/config.h"
 #include "absl/flags/internal/commandlineflag.h"
 #include "absl/flags/internal/registry.h"
+#include "absl/flags/internal/sequence_lock.h"
 #include "absl/flags/marshalling.h"
 #include "absl/meta/type_traits.h"
 #include "absl/strings/string_view.h"
@@ -119,7 +121,7 @@
   flags_internal::CopyConstruct(op, obj, res);
   return res;
 }
-// Returns true if parsing of input text is successfull.
+// Returns true if parsing of input text is successful.
 inline bool Parse(FlagOpFn op, absl::string_view text, void* dst,
                   std::string* error) {
   return op(FlagOp::kParse, &text, dst, error) != nullptr;
@@ -137,12 +139,12 @@
   return static_cast<size_t>(reinterpret_cast<intptr_t>(
       op(FlagOp::kSizeof, nullptr, nullptr, nullptr)));
 }
-// Returns fast type id coresponding to the value type.
+// Returns fast type id corresponding to the value type.
 inline FlagFastTypeId FastTypeId(FlagOpFn op) {
   return reinterpret_cast<FlagFastTypeId>(
       op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr));
 }
-// Returns fast type id coresponding to the value type.
+// Returns fast type id corresponding to the value type.
 inline const std::type_info* RuntimeTypeId(FlagOpFn op) {
   return reinterpret_cast<const std::type_info*>(
       op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr));
@@ -161,7 +163,7 @@
 // Returns an address of RTTI's typeid(T).
 template <typename T>
 inline const std::type_info* GenRuntimeTypeId() {
-#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI)
+#ifdef ABSL_INTERNAL_HAS_RTTI
   return &typeid(T);
 #else
   return nullptr;
@@ -221,12 +223,12 @@
 // first overload if possible. If help message is evaluatable on constexpr
 // context We'll be able to make FixedCharArray out of it and we'll choose first
 // overload. In this case the help message expression is immediately evaluated
-// and is used to construct the absl::Flag. No additionl code is generated by
+// and is used to construct the absl::Flag. No additional code is generated by
 // ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the
 // consideration, in which case the second overload will be used. The second
 // overload does not attempt to evaluate the help message expression
-// immediately and instead delays the evaluation by returing the function
-// pointer (&T::NonConst) genering the help message when necessary. This is
+// immediately and instead delays the evaluation by returning the function
+// pointer (&T::NonConst) generating the help message when necessary. This is
 // evaluatable in constexpr context, but the cost is an extra function being
 // generated in the ABSL_FLAG code.
 template <typename Gen, size_t N>
@@ -288,7 +290,7 @@
 
 template <typename ValueT, typename GenT,
           typename std::enable_if<std::is_integral<ValueT>::value, int>::type =
-              (GenT{}, 0)>
+              ((void)GenT{}, 0)>
 constexpr FlagDefaultArg DefaultArg(int) {
   return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord};
 }
@@ -301,79 +303,55 @@
 ///////////////////////////////////////////////////////////////////////////////
 // Flag current value auxiliary structs.
 
-constexpr int64_t UninitializedFlagValue() { return 0xababababababababll; }
+constexpr int64_t UninitializedFlagValue() {
+  return static_cast<int64_t>(0xababababababababll);
+}
 
 template <typename T>
-using FlagUseOneWordStorage = std::integral_constant<
-    bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
-              (sizeof(T) <= 8)>;
-
-#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD)
-// Clang does not always produce cmpxchg16b instruction when alignment of a 16
-// bytes type is not 16.
-struct alignas(16) AlignedTwoWords {
-  int64_t first;
-  int64_t second;
-
-  bool IsInitialized() const {
-    return first != flags_internal::UninitializedFlagValue();
-  }
-};
+using FlagUseValueAndInitBitStorage =
+    std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+                                     std::is_default_constructible<T>::value &&
+                                     (sizeof(T) < 8)>;
 
 template <typename T>
-using FlagUseTwoWordsStorage = std::integral_constant<
-    bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
-              (sizeof(T) > 8) && (sizeof(T) <= 16)>;
-#else
-// This is actually unused and only here to avoid ifdefs in other palces.
-struct AlignedTwoWords {
-  constexpr AlignedTwoWords() noexcept : dummy() {}
-  constexpr AlignedTwoWords(int64_t, int64_t) noexcept : dummy() {}
-  char dummy;
+using FlagUseOneWordStorage =
+    std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+                                     (sizeof(T) <= 8)>;
 
-  bool IsInitialized() const {
-    std::abort();
-    return true;
-  }
-};
-
-// This trait should be type dependent, otherwise SFINAE below will fail
-template <typename T>
-using FlagUseTwoWordsStorage =
-    std::integral_constant<bool, sizeof(T) != sizeof(T)>;
-#endif
-
-template <typename T>
-using FlagUseBufferStorage =
-    std::integral_constant<bool, !FlagUseOneWordStorage<T>::value &&
-                                     !FlagUseTwoWordsStorage<T>::value>;
+template <class T>
+using FlagUseSequenceLockStorage =
+    std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+                                     (sizeof(T) > 8)>;
 
 enum class FlagValueStorageKind : uint8_t {
-  kAlignedBuffer = 0,
+  kValueAndInitBit = 0,
   kOneWordAtomic = 1,
-  kTwoWordsAtomic = 2
+  kSequenceLocked = 2,
+  kAlignedBuffer = 3,
 };
 
 template <typename T>
 static constexpr FlagValueStorageKind StorageKind() {
-  return FlagUseBufferStorage<T>::value
-             ? FlagValueStorageKind::kAlignedBuffer
-             : FlagUseOneWordStorage<T>::value
-                   ? FlagValueStorageKind::kOneWordAtomic
-                   : FlagValueStorageKind::kTwoWordsAtomic;
+  return FlagUseValueAndInitBitStorage<T>::value
+             ? FlagValueStorageKind::kValueAndInitBit
+         : FlagUseOneWordStorage<T>::value
+             ? FlagValueStorageKind::kOneWordAtomic
+         : FlagUseSequenceLockStorage<T>::value
+             ? FlagValueStorageKind::kSequenceLocked
+             : FlagValueStorageKind::kAlignedBuffer;
 }
 
 struct FlagOneWordValue {
-  constexpr FlagOneWordValue() : value(UninitializedFlagValue()) {}
-
+  constexpr explicit FlagOneWordValue(int64_t v) : value(v) {}
   std::atomic<int64_t> value;
 };
 
-struct FlagTwoWordsValue {
-  constexpr FlagTwoWordsValue()
-      : value(AlignedTwoWords{UninitializedFlagValue(), 0}) {}
-
-  std::atomic<AlignedTwoWords> value;
+template <typename T>
+struct alignas(8) FlagValueAndInitBit {
+  T value;
+  // Use an int instead of a bool to guarantee that a non-zero value has
+  // a bit set.
+  uint8_t init;
 };
 
 template <typename T,
@@ -381,15 +359,22 @@
 struct FlagValue;
 
 template <typename T>
-struct FlagValue<T, FlagValueStorageKind::kAlignedBuffer> {
-  bool Get(T&) const { return false; }
-
-  alignas(T) char value[sizeof(T)];
+struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
+  constexpr FlagValue() : FlagOneWordValue(0) {}
+  bool Get(const SequenceLock&, T& dst) const {
+    int64_t storage = value.load(std::memory_order_acquire);
+    if (ABSL_PREDICT_FALSE(storage == 0)) {
+      return false;
+    }
+    dst = absl::bit_cast<FlagValueAndInitBit<T>>(storage).value;
+    return true;
+  }
 };
 
 template <typename T>
 struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
-  bool Get(T& dst) const {
+  constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {}
+  bool Get(const SequenceLock&, T& dst) const {
     int64_t one_word_val = value.load(std::memory_order_acquire);
     if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) {
       return false;
@@ -400,15 +385,23 @@
 };
 
 template <typename T>
-struct FlagValue<T, FlagValueStorageKind::kTwoWordsAtomic> : FlagTwoWordsValue {
-  bool Get(T& dst) const {
-    AlignedTwoWords two_words_val = value.load(std::memory_order_acquire);
-    if (ABSL_PREDICT_FALSE(!two_words_val.IsInitialized())) {
-      return false;
-    }
-    std::memcpy(&dst, static_cast<const void*>(&two_words_val), sizeof(T));
-    return true;
+struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
+  bool Get(const SequenceLock& lock, T& dst) const {
+    return lock.TryRead(&dst, value_words, sizeof(T));
   }
+
+  static constexpr int kNumWords =
+      flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t);
+
+  alignas(T) alignas(
+      std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords];
+};
+
+template <typename T>
+struct FlagValue<T, FlagValueStorageKind::kAlignedBuffer> {
+  bool Get(const SequenceLock&, T&) const { return false; }
+
+  alignas(T) char value[sizeof(T)];
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -451,13 +444,32 @@
         def_kind_(static_cast<uint8_t>(default_arg.kind)),
         modified_(false),
         on_command_line_(false),
-        counter_(0),
         callback_(nullptr),
         default_value_(default_arg.source),
         data_guard_{} {}
 
   // Constant access methods
+  int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard());
+  bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard());
   void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard());
+  void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) {
+    *value = ReadOneBool();
+  }
+  template <typename T,
+            absl::enable_if_t<flags_internal::StorageKind<T>() ==
+                                  FlagValueStorageKind::kOneWordAtomic,
+                              int> = 0>
+  void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) {
+    int64_t v = ReadOneWord();
+    std::memcpy(value, static_cast<const void*>(&v), sizeof(T));
+  }
+  template <typename T,
+            typename std::enable_if<flags_internal::StorageKind<T>() ==
+                                        FlagValueStorageKind::kValueAndInitBit,
+                                    int>::type = 0>
+  void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) {
+    *value = absl::bit_cast<FlagValueAndInitBit<T>>(ReadOneWord()).value;
+  }
 
   // Mutating access methods
   void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard());
@@ -498,15 +510,17 @@
   // flag.cc, we can define it in that file as well.
   template <typename StorageT>
   StorageT* OffsetValue() const;
-  // This is an accessor for a value stored in an aligned buffer storage.
+  // This is an accessor for a value stored in an aligned buffer storage
+  // used for non-trivially-copyable data types.
   // Returns a mutable pointer to the start of a buffer.
   void* AlignedBufferValue() const;
+
+  // The same as above, but used for sequencelock-protected storage.
+  std::atomic<uint64_t>* AtomicBufferValue() const;
+
   // This is an accessor for a value stored as one word atomic. Returns a
   // mutable reference to an atomic value.
   std::atomic<int64_t>& OneWordValue() const;
-  // This is an accessor for a value stored as two words atomic. Returns a
-  // mutable reference to an atomic value.
-  std::atomic<AlignedTwoWords>& TwoWordsValue() const;
 
   // Attempts to parse supplied `value` string. If parsing is successful,
   // returns new value. Otherwise returns nullptr.
@@ -516,6 +530,12 @@
   // Stores the flag value based on the pointer to the source.
   void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
 
+  // Copy the flag data, protected by `seq_lock_` into `dst`.
+  //
+  // REQUIRES: ValueStorageKind() == kSequenceLocked.
+  void ReadSequenceLockedData(void* dst) const
+      ABSL_LOCKS_EXCLUDED(*DataGuard());
+
   FlagHelpKind HelpSourceKind() const {
     return static_cast<FlagHelpKind>(help_source_kind_);
   }
@@ -541,6 +561,8 @@
   void CheckDefaultValueParsingRoundtrip() const override
       ABSL_LOCKS_EXCLUDED(*DataGuard());
 
+  int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
+
   // Interfaces to save and restore flags to/from persistent state.
   // Returns current flag state or nullptr if flag does not support
   // saving and restoring a state.
@@ -587,8 +609,9 @@
   // Unique tag for absl::call_once call to initialize this flag.
   absl::once_flag init_control_;
 
-  // Mutation counter
-  int64_t counter_ ABSL_GUARDED_BY(*DataGuard());
+  // Sequence lock / mutation counter.
+  flags_internal::SequenceLock seq_lock_;
+
   // Optional flag's callback and absl::Mutex to guard the invocations.
   FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard());
   // Either a pointer to the function generating the default value based on the
@@ -649,7 +672,9 @@
     impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>);
 #endif
 
-    if (!value_.Get(u.value)) impl_.Read(&u.value);
+    if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) {
+      impl_.Read(&u.value);
+    }
     return std::move(u.value);
   }
   void Set(const T& v) {
@@ -733,8 +758,8 @@
     case FlagOp::kValueOffset: {
       // Round sizeof(FlagImp) to a multiple of alignof(FlagValue<T>) to get the
       // offset of the data.
-      ptrdiff_t round_to = alignof(FlagValue<T>);
-      ptrdiff_t offset =
+      size_t round_to = alignof(FlagValue<T>);
+      size_t offset =
           (sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
       return reinterpret_cast<void*>(offset);
     }
@@ -750,8 +775,9 @@
 template <typename T, bool do_register>
 class FlagRegistrar {
  public:
-  explicit FlagRegistrar(Flag<T>& flag) : flag_(flag) {
-    if (do_register) flags_internal::RegisterCommandLineFlag(flag_.impl_);
+  explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) {
+    if (do_register)
+      flags_internal::RegisterCommandLineFlag(flag_.impl_, filename);
   }
 
   FlagRegistrar OnUpdate(FlagCallbackFunc cb) && {
diff --git a/abseil-cpp/absl/flags/internal/flag_msvc.inc b/abseil-cpp/absl/flags/internal/flag_msvc.inc
new file mode 100644
index 0000000..614d09f
--- /dev/null
+++ b/abseil-cpp/absl/flags/internal/flag_msvc.inc
@@ -0,0 +1,116 @@
+//
+//  Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Do not include this file directly.
+// Include absl/flags/flag.h instead.
+
+// MSVC debug builds do not implement initialization with constexpr constructors
+// correctly. To work around this we add a level of indirection, so that the
+// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias
+// to that class) and dynamically allocates an instance when necessary. We also
+// forward all calls to internal::Flag methods via trampoline methods. In this
+// setup the `absl::Flag` class does not have constructor and virtual methods,
+// all the data members are public and thus MSVC is able to initialize it at
+// link time. To deal with multiple threads accessing the flag for the first
+// time concurrently we use an atomic boolean indicating if flag object is
+// initialized. We also employ the double-checked locking pattern where the
+// second level of protection is a global Mutex, so if two threads attempt to
+// construct the flag concurrently only one wins.
+//
+// This solution is based on a recommendation here:
+// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454
+
+namespace flags_internal {
+absl::Mutex* GetGlobalConstructionGuard();
+}  // namespace flags_internal
+
+// Public methods of `absl::Flag<T>` are NOT part of the Abseil Flags API.
+// See https://abseil.io/docs/cpp/guides/flags
+template <typename T>
+class Flag {
+ public:
+  // No constructor and destructor to ensure this is an aggregate type.
+  // Visual Studio 2015 still requires the constructor for class to be
+  // constexpr initializable.
+#if _MSC_VER <= 1900
+  constexpr Flag(const char* name, const char* filename,
+                 const flags_internal::HelpGenFunc help_gen,
+                 const flags_internal::FlagDfltGenFunc default_value_gen)
+      : name_(name),
+        filename_(filename),
+        help_gen_(help_gen),
+        default_value_gen_(default_value_gen),
+        inited_(false),
+        impl_(nullptr) {}
+#endif
+
+  flags_internal::Flag<T>& GetImpl() const {
+    if (!inited_.load(std::memory_order_acquire)) {
+      absl::MutexLock l(flags_internal::GetGlobalConstructionGuard());
+
+      if (inited_.load(std::memory_order_acquire)) {
+        return *impl_;
+      }
+
+      impl_ = new flags_internal::Flag<T>(
+          name_, filename_,
+          {flags_internal::FlagHelpMsg(help_gen_),
+           flags_internal::FlagHelpKind::kGenFunc},
+          {flags_internal::FlagDefaultSrc(default_value_gen_),
+           flags_internal::FlagDefaultKind::kGenFunc});
+      inited_.store(true, std::memory_order_release);
+    }
+
+    return *impl_;
+  }
+
+  // Public methods of `absl::Flag<T>` are NOT part of the Abseil Flags API.
+  // See https://abseil.io/docs/cpp/guides/flags
+  bool IsRetired() const { return GetImpl().IsRetired(); }
+  absl::string_view Name() const { return GetImpl().Name(); }
+  std::string Help() const { return GetImpl().Help(); }
+  bool IsModified() const { return GetImpl().IsModified(); }
+  bool IsSpecifiedOnCommandLine() const {
+    return GetImpl().IsSpecifiedOnCommandLine();
+  }
+  std::string Filename() const { return GetImpl().Filename(); }
+  std::string DefaultValue() const { return GetImpl().DefaultValue(); }
+  std::string CurrentValue() const { return GetImpl().CurrentValue(); }
+  template <typename U>
+  inline bool IsOfType() const {
+    return GetImpl().template IsOfType<U>();
+  }
+  T Get() const {
+    return flags_internal::FlagImplPeer::InvokeGet<T>(GetImpl());
+  }
+  void Set(const T& v) {
+    flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v);
+  }
+  void InvokeCallback() { GetImpl().InvokeCallback(); }
+
+  const CommandLineFlag& Reflect() const {
+    return flags_internal::FlagImplPeer::InvokeReflect(GetImpl());
+  }
+
+  // The data members are logically private, but they need to be public for
+  // this to be an aggregate type.
+  const char* name_;
+  const char* filename_;
+  const flags_internal::HelpGenFunc help_gen_;
+  const flags_internal::FlagDfltGenFunc default_value_gen_;
+
+  mutable std::atomic<bool> inited_;
+  mutable flags_internal::Flag<T>* impl_;
+};
diff --git a/abseil-cpp/absl/flags/internal/parse.h b/abseil-cpp/absl/flags/internal/parse.h
index de706c8..10c531b 100644
--- a/abseil-cpp/absl/flags/internal/parse.h
+++ b/abseil-cpp/absl/flags/internal/parse.h
@@ -16,11 +16,14 @@
 #ifndef ABSL_FLAGS_INTERNAL_PARSE_H_
 #define ABSL_FLAGS_INTERNAL_PARSE_H_
 
+#include <iostream>
+#include <ostream>
 #include <string>
 #include <vector>
 
 #include "absl/base/config.h"
 #include "absl/flags/declare.h"
+#include "absl/flags/internal/usage.h"
 #include "absl/strings/string_view.h"
 
 ABSL_DECLARE_FLAG(std::vector<std::string>, flagfile);
@@ -32,7 +35,6 @@
 ABSL_NAMESPACE_BEGIN
 namespace flags_internal {
 
-enum class ArgvListAction { kRemoveParsedArgs, kKeepParsedArgs };
 enum class UsageFlagsAction { kHandleUsage, kIgnoreUsage };
 enum class OnUndefinedFlag {
   kIgnoreUndefined,
@@ -40,10 +42,15 @@
   kAbortIfUndefined
 };
 
-std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
-                                        ArgvListAction arg_list_act,
-                                        UsageFlagsAction usage_flag_act,
-                                        OnUndefinedFlag on_undef_flag);
+// This is not a public interface. This interface exists to expose the ability
+// to change help output stream in case of parsing errors. This is used by
+// internal unit tests to validate expected outputs.
+// When this was written, `EXPECT_EXIT` only supported matchers on stderr,
+// but not on stdout.
+std::vector<char*> ParseCommandLineImpl(
+    int argc, char* argv[], UsageFlagsAction usage_flag_action,
+    OnUndefinedFlag undef_flag_action,
+    std::ostream& error_help_output = std::cout);
 
 // --------------------------------------------------------------------
 // Inspect original command line
@@ -52,6 +59,10 @@
 // command line or specified in flag file present on the original command line.
 bool WasPresentOnCommandLine(absl::string_view flag_name);
 
+// Return existing flags similar to the parameter, in order to help in case of
+// misspellings.
+std::vector<std::string> GetMisspellingHints(absl::string_view flag);
+
 }  // namespace flags_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/flags/internal/registry.h b/abseil-cpp/absl/flags/internal/registry.h
index 1df2db7..4b68c85 100644
--- a/abseil-cpp/absl/flags/internal/registry.h
+++ b/abseil-cpp/absl/flags/internal/registry.h
@@ -30,16 +30,15 @@
 ABSL_NAMESPACE_BEGIN
 namespace flags_internal {
 
-// Executes specified visitor for each non-retired flag in the registry.
-// Requires the caller hold the registry lock.
-void ForEachFlagUnlocked(std::function<void(CommandLineFlag&)> visitor);
 // Executes specified visitor for each non-retired flag in the registry. While
 // callback are executed, the registry is locked and can't be changed.
 void ForEachFlag(std::function<void(CommandLineFlag&)> visitor);
 
 //-----------------------------------------------------------------------------
 
-bool RegisterCommandLineFlag(CommandLineFlag&);
+bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename);
+
+void FinalizeRegistry();
 
 //-----------------------------------------------------------------------------
 // Retired registrations:
diff --git a/abseil-cpp/absl/flags/internal/sequence_lock.h b/abseil-cpp/absl/flags/internal/sequence_lock.h
new file mode 100644
index 0000000..36318ab
--- /dev/null
+++ b/abseil-cpp/absl/flags/internal/sequence_lock.h
@@ -0,0 +1,187 @@
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstring>
+
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace flags_internal {
+
+// Align 'x' up to the nearest 'align' bytes.
+inline constexpr size_t AlignUp(size_t x, size_t align) {
+  return align * ((x + align - 1) / align);
+}
+
+// A SequenceLock implements lock-free reads. A sequence counter is incremented
+// before and after each write, and readers access the counter before and after
+// accessing the protected data. If the counter is verified to not change during
+// the access, and the sequence counter value was even, then the reader knows
+// that the read was race-free and valid. Otherwise, the reader must fall back
+// to a Mutex-based code path.
+//
+// This particular SequenceLock starts in an "uninitialized" state in which
+// TryRead() returns false. It must be enabled by calling MarkInitialized().
+// This serves as a marker that the associated flag value has not yet been
+// initialized and a slow path needs to be taken.
+//
+// The memory reads and writes protected by this lock must use the provided
+// `TryRead()` and `Write()` functions. These functions behave similarly to
+// `memcpy()`, with one oddity: the protected data must be an array of
+// `std::atomic<uint64>`. This is to comply with the C++ standard, which
+// considers data races on non-atomic objects to be undefined behavior. See "Can
+// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
+// Boehm for more details.
+//
+// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+class SequenceLock {
+ public:
+  constexpr SequenceLock() : lock_(kUninitialized) {}
+
+  // Mark that this lock is ready for use.
+  void MarkInitialized() {
+    assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
+    lock_.store(0, std::memory_order_release);
+  }
+
+  // Copy "size" bytes of data from "src" to "dst", protected as a read-side
+  // critical section of the sequence lock.
+  //
+  // Unlike traditional sequence lock implementations which loop until getting a
+  // clean read, this implementation returns false in the case of concurrent
+  // calls to `Write`. In such a case, the caller should fall back to a
+  // locking-based slow path.
+  //
+  // Returns false if the sequence lock was not yet marked as initialized.
+  //
+  // NOTE: If this returns false, "dst" may be overwritten with undefined
+  // (potentially uninitialized) data.
+  bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
+    // Acquire barrier ensures that no loads done by f() are reordered
+    // above the first load of the sequence counter.
+    int64_t seq_before = lock_.load(std::memory_order_acquire);
+    if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
+    RelaxedCopyFromAtomic(dst, src, size);
+    // Another acquire fence ensures that the load of 'lock_' below is
+    // strictly ordered after the RelaxedCopyToAtomic call above.
+    std::atomic_thread_fence(std::memory_order_acquire);
+    int64_t seq_after = lock_.load(std::memory_order_relaxed);
+    return ABSL_PREDICT_TRUE(seq_before == seq_after);
+  }
+
+  // Copy "size" bytes from "src" to "dst" as a write-side critical section
+  // of the sequence lock. Any concurrent readers will be forced to retry
+  // until they get a read that does not conflict with this write.
+  //
+  // This call must be externally synchronized against other calls to Write,
+  // but may proceed concurrently with reads.
+  void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
+    // We can use relaxed instructions to increment the counter since we
+    // are extenally synchronized. The std::atomic_thread_fence below
+    // ensures that the counter updates don't get interleaved with the
+    // copy to the data.
+    int64_t orig_seq = lock_.load(std::memory_order_relaxed);
+    assert((orig_seq & 1) == 0);  // Must be initially unlocked.
+    lock_.store(orig_seq + 1, std::memory_order_relaxed);
+
+    // We put a release fence between update to lock_ and writes to shared data.
+    // Thus all stores to shared data are effectively release operations and
+    // update to lock_ above cannot be re-ordered past any of them. Note that
+    // this barrier is not for the fetch_add above.  A release barrier for the
+    // fetch_add would be before it, not after.
+    std::atomic_thread_fence(std::memory_order_release);
+    RelaxedCopyToAtomic(dst, src, size);
+    // "Release" semantics ensure that none of the writes done by
+    // RelaxedCopyToAtomic() can be reordered after the following modification.
+    lock_.store(orig_seq + 2, std::memory_order_release);
+  }
+
+  // Return the number of times that Write() has been called.
+  //
+  // REQUIRES: This must be externally synchronized against concurrent calls to
+  // `Write()` or `IncrementModificationCount()`.
+  // REQUIRES: `MarkInitialized()` must have been previously called.
+  int64_t ModificationCount() const {
+    int64_t val = lock_.load(std::memory_order_relaxed);
+    assert(val != kUninitialized && (val & 1) == 0);
+    return val / 2;
+  }
+
+  // REQUIRES: This must be externally synchronized against concurrent calls to
+  // `Write()` or `ModificationCount()`.
+  // REQUIRES: `MarkInitialized()` must have been previously called.
+  void IncrementModificationCount() {
+    int64_t val = lock_.load(std::memory_order_relaxed);
+    assert(val != kUninitialized);
+    lock_.store(val + 2, std::memory_order_relaxed);
+  }
+
+ private:
+  // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
+  // atomics.
+  static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
+                                    size_t size) {
+    char* dst_byte = static_cast<char*>(dst);
+    while (size >= sizeof(uint64_t)) {
+      uint64_t word = src->load(std::memory_order_relaxed);
+      std::memcpy(dst_byte, &word, sizeof(word));
+      dst_byte += sizeof(word);
+      src++;
+      size -= sizeof(word);
+    }
+    if (size > 0) {
+      uint64_t word = src->load(std::memory_order_relaxed);
+      std::memcpy(dst_byte, &word, size);
+    }
+  }
+
+  // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
+  // atomics.
+  static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
+                                  size_t size) {
+    const char* src_byte = static_cast<const char*>(src);
+    while (size >= sizeof(uint64_t)) {
+      uint64_t word;
+      std::memcpy(&word, src_byte, sizeof(word));
+      dst->store(word, std::memory_order_relaxed);
+      src_byte += sizeof(word);
+      dst++;
+      size -= sizeof(word);
+    }
+    if (size > 0) {
+      uint64_t word = 0;
+      std::memcpy(&word, src_byte, size);
+      dst->store(word, std::memory_order_relaxed);
+    }
+  }
+
+  static constexpr int64_t kUninitialized = -1;
+  std::atomic<int64_t> lock_;
+};
+
+}  // namespace flags_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
diff --git a/abseil-cpp/absl/flags/internal/sequence_lock_test.cc b/abseil-cpp/absl/flags/internal/sequence_lock_test.cc
new file mode 100644
index 0000000..c3ec372
--- /dev/null
+++ b/abseil-cpp/absl/flags/internal/sequence_lock_test.cc
@@ -0,0 +1,169 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/flags/internal/sequence_lock.h"
+
+#include <algorithm>
+#include <atomic>
+#include <thread>  // NOLINT(build/c++11)
+#include <tuple>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/container/fixed_array.h"
+#include "absl/time/clock.h"
+
+namespace {
+
+namespace flags = absl::flags_internal;
+
+class ConcurrentSequenceLockTest
+    : public testing::TestWithParam<std::tuple<int, int>> {
+ public:
+  ConcurrentSequenceLockTest()
+      : buf_bytes_(std::get<0>(GetParam())),
+        num_threads_(std::get<1>(GetParam())) {}
+
+ protected:
+  const int buf_bytes_;
+  const int num_threads_;
+};
+
+TEST_P(ConcurrentSequenceLockTest, ReadAndWrite) {
+  const int buf_words =
+      flags::AlignUp(buf_bytes_, sizeof(uint64_t)) / sizeof(uint64_t);
+
+  // The buffer that will be protected by the SequenceLock.
+  absl::FixedArray<std::atomic<uint64_t>> protected_buf(buf_words);
+  for (auto& v : protected_buf) v = -1;
+
+  flags::SequenceLock seq_lock;
+  std::atomic<bool> stop{false};
+  std::atomic<int64_t> bad_reads{0};
+  std::atomic<int64_t> good_reads{0};
+  std::atomic<int64_t> unsuccessful_reads{0};
+
+  // Start a bunch of threads which read 'protected_buf' under the sequence
+  // lock. The main thread will concurrently update 'protected_buf'. The updates
+  // always consist of an array of identical integers. The reader ensures that
+  // any data it reads matches that pattern (i.e. the reads are not "torn").
+  std::vector<std::thread> threads;
+  for (int i = 0; i < num_threads_; i++) {
+    threads.emplace_back([&]() {
+      absl::FixedArray<char> local_buf(buf_bytes_);
+      while (!stop.load(std::memory_order_relaxed)) {
+        if (seq_lock.TryRead(local_buf.data(), protected_buf.data(),
+                             buf_bytes_)) {
+          bool good = true;
+          for (const auto& v : local_buf) {
+            if (v != local_buf[0]) good = false;
+          }
+          if (good) {
+            good_reads.fetch_add(1, std::memory_order_relaxed);
+          } else {
+            bad_reads.fetch_add(1, std::memory_order_relaxed);
+          }
+        } else {
+          unsuccessful_reads.fetch_add(1, std::memory_order_relaxed);
+        }
+      }
+    });
+  }
+  while (unsuccessful_reads.load(std::memory_order_relaxed) < num_threads_) {
+    absl::SleepFor(absl::Milliseconds(1));
+  }
+  seq_lock.MarkInitialized();
+
+  // Run a maximum of 5 seconds. On Windows, the scheduler behavior seems
+  // somewhat unfair and without an explicit timeout for this loop, the tests
+  // can run a long time.
+  absl::Time deadline = absl::Now() + absl::Seconds(5);
+  for (int i = 0; i < 100 && absl::Now() < deadline; i++) {
+    absl::FixedArray<char> writer_buf(buf_bytes_);
+    for (auto& v : writer_buf) v = i;
+    seq_lock.Write(protected_buf.data(), writer_buf.data(), buf_bytes_);
+    absl::SleepFor(absl::Microseconds(10));
+  }
+  stop.store(true, std::memory_order_relaxed);
+  for (auto& t : threads) t.join();
+  ASSERT_GE(good_reads, 0);
+  ASSERT_EQ(bad_reads, 0);
+}
+
+// Simple helper for generating a range of thread counts.
+// Generates [low, low*scale, low*scale^2, ...high)
+// (even if high is between low*scale^k and low*scale^(k+1)).
+std::vector<int> MultiplicativeRange(int low, int high, int scale) {
+  std::vector<int> result;
+  for (int current = low; current < high; current *= scale) {
+    result.push_back(current);
+  }
+  result.push_back(high);
+  return result;
+}
+
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+const int kMaxThreads = absl::base_internal::NumCPUs();
+#else
+// With TSAN, a lot of threads contending for atomic access on the sequence
+// lock make this test run too slowly.
+const int kMaxThreads = std::min(absl::base_internal::NumCPUs(), 4);
+#endif
+
+// Return all of the interesting buffer sizes worth testing:
+// powers of two and adjacent values.
+std::vector<int> InterestingBufferSizes() {
+  std::vector<int> ret;
+  for (int v : MultiplicativeRange(1, 128, 2)) {
+    ret.push_back(v);
+    if (v > 1) {
+      ret.push_back(v - 1);
+    }
+    ret.push_back(v + 1);
+  }
+  return ret;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+    TestManyByteSizes, ConcurrentSequenceLockTest,
+    testing::Combine(
+        // Buffer size (bytes).
+        testing::ValuesIn(InterestingBufferSizes()),
+        // Number of reader threads.
+        testing::ValuesIn(MultiplicativeRange(1, kMaxThreads, 2))));
+
+// Simple single-threaded test, parameterized by the size of the buffer to be
+// protected.
+class SequenceLockTest : public testing::TestWithParam<int> {};
+
+TEST_P(SequenceLockTest, SingleThreaded) {
+  const int size = GetParam();
+  absl::FixedArray<std::atomic<uint64_t>> protected_buf(
+      flags::AlignUp(size, sizeof(uint64_t)) / sizeof(uint64_t));
+
+  flags::SequenceLock seq_lock;
+  seq_lock.MarkInitialized();
+
+  std::vector<char> src_buf(size, 'x');
+  seq_lock.Write(protected_buf.data(), src_buf.data(), size);
+
+  std::vector<char> dst_buf(size, '0');
+  ASSERT_TRUE(seq_lock.TryRead(dst_buf.data(), protected_buf.data(), size));
+  ASSERT_EQ(src_buf, dst_buf);
+}
+INSTANTIATE_TEST_SUITE_P(TestManyByteSizes, SequenceLockTest,
+                         // Buffer size (bytes).
+                         testing::Range(1, 128));
+
+}  // namespace
diff --git a/abseil-cpp/absl/flags/internal/usage.cc b/abseil-cpp/absl/flags/internal/usage.cc
index 0805df3..13852e1 100644
--- a/abseil-cpp/absl/flags/internal/usage.cc
+++ b/abseil-cpp/absl/flags/internal/usage.cc
@@ -17,7 +17,10 @@
 
 #include <stdint.h>
 
+#include <algorithm>
+#include <cstdlib>
 #include <functional>
+#include <iterator>
 #include <map>
 #include <ostream>
 #include <string>
@@ -33,30 +36,31 @@
 #include "absl/flags/internal/program_name.h"
 #include "absl/flags/internal/registry.h"
 #include "absl/flags/usage_config.h"
+#include "absl/strings/match.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
 #include "absl/strings/string_view.h"
 
-ABSL_FLAG(bool, help, false,
-          "show help on important flags for this binary [tip: all flags can "
-          "have two dashes]");
-ABSL_FLAG(bool, helpfull, false, "show help on all flags");
-ABSL_FLAG(bool, helpshort, false,
-          "show help on only the main module for this program");
-ABSL_FLAG(bool, helppackage, false,
-          "show help on all modules in the main package");
-ABSL_FLAG(bool, version, false, "show version and build info and exit");
-ABSL_FLAG(bool, only_check_args, false, "exit after checking all flags");
-ABSL_FLAG(std::string, helpon, "",
-          "show help on the modules named by this flag value");
-ABSL_FLAG(std::string, helpmatch, "",
-          "show help on modules whose name contains the specified substr");
+// Dummy global variables to prevent anyone else defining these.
+bool FLAGS_help = false;
+bool FLAGS_helpfull = false;
+bool FLAGS_helpshort = false;
+bool FLAGS_helppackage = false;
+bool FLAGS_version = false;
+bool FLAGS_only_check_args = false;
+bool FLAGS_helpon = false;
+bool FLAGS_helpmatch = false;
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace flags_internal {
 namespace {
 
+using PerFlagFilter = std::function<bool(const absl::CommandLineFlag&)>;
+
+// Maximum length size in a human readable format.
+constexpr size_t kHrfMaxLineLength = 80;
+
 // This class is used to emit an XML element with `tag` and `text`.
 // It adds opening and closing tags and escapes special characters in the text.
 // For example:
@@ -88,8 +92,16 @@
         case '>':
           out << "&gt;";
           break;
+        case '\n':
+        case '\v':
+        case '\f':
+        case '\t':
+          out << " ";
+          break;
         default:
-          out << c;
+          if (IsValidXmlCharacter(static_cast<unsigned char>(c))) {
+            out << c;
+          }
           break;
       }
     }
@@ -98,6 +110,7 @@
   }
 
  private:
+  static bool IsValidXmlCharacter(unsigned char c) { return c >= 0x20; }
   absl::string_view tag_;
   absl::string_view txt_;
 };
@@ -109,9 +122,12 @@
  public:
   // Pretty printer holds on to the std::ostream& reference to direct an output
   // to that stream.
-  FlagHelpPrettyPrinter(int max_line_len, std::ostream& out)
+  FlagHelpPrettyPrinter(size_t max_line_len, size_t min_line_len,
+                        size_t wrapped_line_indent, std::ostream& out)
       : out_(out),
         max_line_len_(max_line_len),
+        min_line_len_(min_line_len),
+        wrapped_line_indent_(wrapped_line_indent),
         line_len_(0),
         first_line_(true) {}
 
@@ -124,7 +140,7 @@
       for (auto line : absl::StrSplit(str, absl::ByAnyChar("\n\r"))) {
         if (!tokens.empty()) {
           // Keep line separators in the input string.
-          tokens.push_back("\n");
+          tokens.emplace_back("\n");
         }
         for (auto token :
              absl::StrSplit(line, absl::ByAnyChar(" \t"), absl::SkipEmpty())) {
@@ -164,13 +180,12 @@
 
   void StartLine() {
     if (first_line_) {
-      out_ << "    ";
-      line_len_ = 4;
+      line_len_ = min_line_len_;
       first_line_ = false;
     } else {
-      out_ << "      ";
-      line_len_ = 6;
+      line_len_ = min_line_len_ + wrapped_line_indent_;
     }
+    out_ << std::string(line_len_, ' ');
   }
   void EndLine() {
     out_ << '\n';
@@ -179,13 +194,15 @@
 
  private:
   std::ostream& out_;
-  const int max_line_len_;
-  int line_len_;
+  const size_t max_line_len_;
+  const size_t min_line_len_;
+  const size_t wrapped_line_indent_;
+  size_t line_len_;
   bool first_line_;
 };
 
 void FlagHelpHumanReadable(const CommandLineFlag& flag, std::ostream& out) {
-  FlagHelpPrettyPrinter printer(80, out);  // Max line length is 80.
+  FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 4, 2, out);
 
   // Flag name.
   printer.Write(absl::StrCat("--", flag.Name()));
@@ -221,7 +238,7 @@
 // If a flag's help message has been stripped (e.g. by adding '#define
 // STRIP_FLAG_HELP 1' then this flag will not be displayed by '--help'
 // and its variants.
-void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
+void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb,
                    HelpFormat format, absl::string_view program_usage_message) {
   if (format == HelpFormat::kHumanReadable) {
     out << flags_internal::ShortProgramInvocationName() << ": "
@@ -240,7 +257,7 @@
         << XMLElement("usage", program_usage_message) << '\n';
   }
 
-  // Map of package name to
+  // Ordered map of package name to
   //   map of file name to
   //     vector of flags in the file.
   // This map is used to output matching flags grouped by package and file
@@ -256,10 +273,10 @@
     // If the flag has been stripped, pretend that it doesn't exist.
     if (flag.Help() == flags_internal::kStrippedFlagHelp) return;
 
-    std::string flag_filename = flag.Filename();
-
     // Make sure flag satisfies the filter
-    if (!filter_cb || !filter_cb(flag_filename)) return;
+    if (!filter_cb(flag)) return;
+
+    std::string flag_filename = flag.Filename();
 
     matching_flags[std::string(flags_internal::Package(flag_filename))]
                   [flag_filename]
@@ -268,20 +285,26 @@
 
   absl::string_view package_separator;  // controls blank lines between packages
   absl::string_view file_separator;     // controls blank lines between files
-  for (const auto& package : matching_flags) {
+  for (auto& package : matching_flags) {
     if (format == HelpFormat::kHumanReadable) {
       out << package_separator;
       package_separator = "\n\n";
     }
 
     file_separator = "";
-    for (const auto& flags_in_file : package.second) {
+    for (auto& flags_in_file : package.second) {
       if (format == HelpFormat::kHumanReadable) {
         out << file_separator << "  Flags from " << flags_in_file.first
             << ":\n";
         file_separator = "\n";
       }
 
+      std::sort(std::begin(flags_in_file.second),
+                std::end(flags_in_file.second),
+                [](const CommandLineFlag* lhs, const CommandLineFlag* rhs) {
+                  return lhs->Name() < rhs->Name();
+                });
+
       for (const auto* flag : flags_in_file.second) {
         flags_internal::FlagHelp(out, *flag, format);
       }
@@ -289,15 +312,34 @@
   }
 
   if (format == HelpFormat::kHumanReadable) {
+    FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 0, 0, out);
+
     if (filter_cb && matching_flags.empty()) {
-      out << "  No modules matched: use -helpfull\n";
+      printer.Write("No flags matched.\n", true);
     }
+    printer.EndLine();
+    printer.Write(
+        "Try --helpfull to get a list of all flags or --help=substring "
+        "shows help for flags which include specified substring in either "
+        "in the name, or description or path.\n",
+        true);
   } else {
     // The end of the document.
     out << "</AllFlags>\n";
   }
 }
 
+void FlagsHelpImpl(std::ostream& out,
+                   flags_internal::FlagKindFilter filename_filter_cb,
+                   HelpFormat format, absl::string_view program_usage_message) {
+  FlagsHelpImpl(
+      out,
+      [&](const absl::CommandLineFlag& flag) {
+        return filename_filter_cb && filename_filter_cb(flag.Filename());
+      },
+      format, program_usage_message);
+}
+
 }  // namespace
 
 // --------------------------------------------------------------------
@@ -309,12 +351,12 @@
 }
 
 // --------------------------------------------------------------------
-// Produces the help messages for all flags matching the filter.
+// Produces the help messages for all flags matching the filename filter.
 // If filter is empty produces help messages for all flags.
 void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format,
                absl::string_view program_usage_message) {
   flags_internal::FlagKindFilter filter_cb = [&](absl::string_view filename) {
-    return filter.empty() || filename.find(filter) != absl::string_view::npos;
+    return filter.empty() || absl::StrContains(filename, filter);
   };
   flags_internal::FlagsHelpImpl(out, filter_cb, format, program_usage_message);
 }
@@ -322,70 +364,187 @@
 // --------------------------------------------------------------------
 // Checks all the 'usage' command line flags to see if any have been set.
 // If so, handles them appropriately.
-int HandleUsageFlags(std::ostream& out,
-                     absl::string_view program_usage_message) {
-  if (absl::GetFlag(FLAGS_helpshort)) {
-    flags_internal::FlagsHelpImpl(
-        out, flags_internal::GetUsageConfig().contains_helpshort_flags,
-        HelpFormat::kHumanReadable, program_usage_message);
-    return 1;
+HelpMode HandleUsageFlags(std::ostream& out,
+                          absl::string_view program_usage_message) {
+  switch (GetFlagsHelpMode()) {
+    case HelpMode::kNone:
+      break;
+    case HelpMode::kImportant:
+      flags_internal::FlagsHelpImpl(
+          out, flags_internal::GetUsageConfig().contains_help_flags,
+          GetFlagsHelpFormat(), program_usage_message);
+      break;
+
+    case HelpMode::kShort:
+      flags_internal::FlagsHelpImpl(
+          out, flags_internal::GetUsageConfig().contains_helpshort_flags,
+          GetFlagsHelpFormat(), program_usage_message);
+      break;
+
+    case HelpMode::kFull:
+      flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(),
+                                program_usage_message);
+      break;
+
+    case HelpMode::kPackage:
+      flags_internal::FlagsHelpImpl(
+          out, flags_internal::GetUsageConfig().contains_helppackage_flags,
+          GetFlagsHelpFormat(), program_usage_message);
+      break;
+
+    case HelpMode::kMatch: {
+      std::string substr = GetFlagsHelpMatchSubstr();
+      if (substr.empty()) {
+        // show all options
+        flags_internal::FlagsHelp(out, substr, GetFlagsHelpFormat(),
+                                  program_usage_message);
+      } else {
+        auto filter_cb = [&substr](const absl::CommandLineFlag& flag) {
+          if (absl::StrContains(flag.Name(), substr)) return true;
+          if (absl::StrContains(flag.Filename(), substr)) return true;
+          if (absl::StrContains(flag.Help(), substr)) return true;
+
+          return false;
+        };
+        flags_internal::FlagsHelpImpl(
+            out, filter_cb, HelpFormat::kHumanReadable, program_usage_message);
+      }
+      break;
+    }
+    case HelpMode::kVersion:
+      if (flags_internal::GetUsageConfig().version_string)
+        out << flags_internal::GetUsageConfig().version_string();
+      // Unlike help, we may be asking for version in a script, so return 0
+      break;
+
+    case HelpMode::kOnlyCheckArgs:
+      break;
   }
 
-  if (absl::GetFlag(FLAGS_helpfull)) {
-    // show all options
-    flags_internal::FlagsHelp(out, "", HelpFormat::kHumanReadable,
-                              program_usage_message);
-    return 1;
-  }
-
-  if (!absl::GetFlag(FLAGS_helpon).empty()) {
-    flags_internal::FlagsHelp(
-        out, absl::StrCat("/", absl::GetFlag(FLAGS_helpon), "."),
-        HelpFormat::kHumanReadable, program_usage_message);
-    return 1;
-  }
-
-  if (!absl::GetFlag(FLAGS_helpmatch).empty()) {
-    flags_internal::FlagsHelp(out, absl::GetFlag(FLAGS_helpmatch),
-                              HelpFormat::kHumanReadable,
-                              program_usage_message);
-    return 1;
-  }
-
-  if (absl::GetFlag(FLAGS_help)) {
-    flags_internal::FlagsHelpImpl(
-        out, flags_internal::GetUsageConfig().contains_help_flags,
-        HelpFormat::kHumanReadable, program_usage_message);
-
-    out << "\nTry --helpfull to get a list of all flags.\n";
-
-    return 1;
-  }
-
-  if (absl::GetFlag(FLAGS_helppackage)) {
-    flags_internal::FlagsHelpImpl(
-        out, flags_internal::GetUsageConfig().contains_helppackage_flags,
-        HelpFormat::kHumanReadable, program_usage_message);
-
-    out << "\nTry --helpfull to get a list of all flags.\n";
-
-    return 1;
-  }
-
-  if (absl::GetFlag(FLAGS_version)) {
-    if (flags_internal::GetUsageConfig().version_string)
-      out << flags_internal::GetUsageConfig().version_string();
-    // Unlike help, we may be asking for version in a script, so return 0
-    return 0;
-  }
-
-  if (absl::GetFlag(FLAGS_only_check_args)) {
-    return 0;
-  }
-
-  return -1;
+  return GetFlagsHelpMode();
 }
 
+// --------------------------------------------------------------------
+// Globals representing usage reporting flags
+
+namespace {
+
+ABSL_CONST_INIT absl::Mutex help_attributes_guard(absl::kConstInit);
+ABSL_CONST_INIT std::string* match_substr
+    ABSL_GUARDED_BY(help_attributes_guard) = nullptr;
+ABSL_CONST_INIT HelpMode help_mode ABSL_GUARDED_BY(help_attributes_guard) =
+    HelpMode::kNone;
+ABSL_CONST_INIT HelpFormat help_format ABSL_GUARDED_BY(help_attributes_guard) =
+    HelpFormat::kHumanReadable;
+
+}  // namespace
+
+std::string GetFlagsHelpMatchSubstr() {
+  absl::MutexLock l(&help_attributes_guard);
+  if (match_substr == nullptr) return "";
+  return *match_substr;
+}
+
+void SetFlagsHelpMatchSubstr(absl::string_view substr) {
+  absl::MutexLock l(&help_attributes_guard);
+  if (match_substr == nullptr) match_substr = new std::string;
+  match_substr->assign(substr.data(), substr.size());
+}
+
+HelpMode GetFlagsHelpMode() {
+  absl::MutexLock l(&help_attributes_guard);
+  return help_mode;
+}
+
+void SetFlagsHelpMode(HelpMode mode) {
+  absl::MutexLock l(&help_attributes_guard);
+  help_mode = mode;
+}
+
+HelpFormat GetFlagsHelpFormat() {
+  absl::MutexLock l(&help_attributes_guard);
+  return help_format;
+}
+
+void SetFlagsHelpFormat(HelpFormat format) {
+  absl::MutexLock l(&help_attributes_guard);
+  help_format = format;
+}
+
+// Deduces usage flags from the input argument in a form --name=value or
+// --name. argument is already split into name and value before we call this
+// function.
+bool DeduceUsageFlags(absl::string_view name, absl::string_view value) {
+  if (absl::ConsumePrefix(&name, "help")) {
+    if (name.empty()) {
+      if (value.empty()) {
+        SetFlagsHelpMode(HelpMode::kImportant);
+      } else {
+        SetFlagsHelpMode(HelpMode::kMatch);
+        SetFlagsHelpMatchSubstr(value);
+      }
+      return true;
+    }
+
+    if (name == "match") {
+      SetFlagsHelpMode(HelpMode::kMatch);
+      SetFlagsHelpMatchSubstr(value);
+      return true;
+    }
+
+    if (name == "on") {
+      SetFlagsHelpMode(HelpMode::kMatch);
+      SetFlagsHelpMatchSubstr(absl::StrCat("/", value, "."));
+      return true;
+    }
+
+    if (name == "full") {
+      SetFlagsHelpMode(HelpMode::kFull);
+      return true;
+    }
+
+    if (name == "short") {
+      SetFlagsHelpMode(HelpMode::kShort);
+      return true;
+    }
+
+    if (name == "package") {
+      SetFlagsHelpMode(HelpMode::kPackage);
+      return true;
+    }
+
+    return false;
+  }
+
+  if (name == "version") {
+    SetFlagsHelpMode(HelpMode::kVersion);
+    return true;
+  }
+
+  if (name == "only_check_args") {
+    SetFlagsHelpMode(HelpMode::kOnlyCheckArgs);
+    return true;
+  }
+
+  return false;
+}
+
+// --------------------------------------------------------------------
+
+void MaybeExit(HelpMode mode) {
+  switch (mode) {
+    case flags_internal::HelpMode::kNone:
+      return;
+    case flags_internal::HelpMode::kOnlyCheckArgs:
+    case flags_internal::HelpMode::kVersion:
+      std::exit(0);
+    default:  // For all the other modes we exit with 1
+      std::exit(1);
+  }
+}
+
+// --------------------------------------------------------------------
+
 }  // namespace flags_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/flags/internal/usage.h b/abseil-cpp/absl/flags/internal/usage.h
index 0c62dc4..a96cbf3 100644
--- a/abseil-cpp/absl/flags/internal/usage.h
+++ b/abseil-cpp/absl/flags/internal/usage.h
@@ -17,11 +17,11 @@
 #define ABSL_FLAGS_INTERNAL_USAGE_H_
 
 #include <iosfwd>
+#include <ostream>
 #include <string>
 
 #include "absl/base/config.h"
 #include "absl/flags/commandlineflag.h"
-#include "absl/flags/declare.h"
 #include "absl/strings/string_view.h"
 
 // --------------------------------------------------------------------
@@ -36,7 +36,20 @@
   kHumanReadable,
 };
 
-// Outputs the help message describing specific flag.
+// The kind of usage help requested.
+enum class HelpMode {
+  kNone,
+  kImportant,
+  kShort,
+  kFull,
+  kPackage,
+  kMatch,
+  kVersion,
+  kOnlyCheckArgs
+};
+
+// Streams the help message describing `flag` to `out`.
+// The default value for `flag` is included in the output.
 void FlagHelp(std::ostream& out, const CommandLineFlag& flag,
               HelpFormat format = HelpFormat::kHumanReadable);
 
@@ -56,26 +69,38 @@
 
 // If any of the 'usage' related command line flags (listed on the bottom of
 // this file) has been set this routine produces corresponding help message in
-// the specified output stream and returns:
-//  0 - if "version" or "only_check_flags" flags were set and handled.
-//  1 - if some other 'usage' related flag was set and handled.
-// -1 - if no usage flags were set on a commmand line.
-// Non negative return values are expected to be used as an exit code for a
-// binary.
-int HandleUsageFlags(std::ostream& out,
-                     absl::string_view program_usage_message);
+// the specified output stream and returns HelpMode that was handled. Otherwise
+// it returns HelpMode::kNone.
+HelpMode HandleUsageFlags(std::ostream& out,
+                          absl::string_view program_usage_message);
+
+// --------------------------------------------------------------------
+// Encapsulates the logic of exiting the binary depending on handled help mode.
+
+void MaybeExit(HelpMode mode);
+
+// --------------------------------------------------------------------
+// Globals representing usage reporting flags
+
+// Returns substring to filter help output (--help=substr argument)
+std::string GetFlagsHelpMatchSubstr();
+// Returns the requested help mode.
+HelpMode GetFlagsHelpMode();
+// Returns the requested help format.
+HelpFormat GetFlagsHelpFormat();
+
+// These are corresponding setters to the attributes above.
+void SetFlagsHelpMatchSubstr(absl::string_view);
+void SetFlagsHelpMode(HelpMode);
+void SetFlagsHelpFormat(HelpFormat);
+
+// Deduces usage flags from the input argument in a form --name=value or
+// --name. argument is already split into name and value before we call this
+// function.
+bool DeduceUsageFlags(absl::string_view name, absl::string_view value);
 
 }  // namespace flags_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-ABSL_DECLARE_FLAG(bool, help);
-ABSL_DECLARE_FLAG(bool, helpfull);
-ABSL_DECLARE_FLAG(bool, helpshort);
-ABSL_DECLARE_FLAG(bool, helppackage);
-ABSL_DECLARE_FLAG(bool, version);
-ABSL_DECLARE_FLAG(bool, only_check_args);
-ABSL_DECLARE_FLAG(std::string, helpon);
-ABSL_DECLARE_FLAG(std::string, helpmatch);
-
 #endif  // ABSL_FLAGS_INTERNAL_USAGE_H_
diff --git a/abseil-cpp/absl/flags/internal/usage_test.cc b/abseil-cpp/absl/flags/internal/usage_test.cc
index 6e583fb..6847386 100644
--- a/abseil-cpp/absl/flags/internal/usage_test.cc
+++ b/abseil-cpp/absl/flags/internal/usage_test.cc
@@ -20,10 +20,10 @@
 #include <sstream>
 #include <string>
 
+#include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/flags/flag.h"
 #include "absl/flags/internal/parse.h"
-#include "absl/flags/internal/path_util.h"
 #include "absl/flags/internal/program_name.h"
 #include "absl/flags/reflection.h"
 #include "absl/flags/usage.h"
@@ -39,15 +39,20 @@
           "usage_reporting_test_flag_03 help message");
 ABSL_FLAG(int64_t, usage_reporting_test_flag_04, 1000000000000004L,
           "usage_reporting_test_flag_04 help message");
+ABSL_FLAG(std::string, usage_reporting_test_flag_07, "\r\n\f\v\a\b\t ",
+          "usage_reporting_test_flag_07 help \r\n\f\v\a\b\t ");
 
 static const char kTestUsageMessage[] = "Custom usage message";
 
 struct UDT {
   UDT() = default;
   UDT(const UDT&) = default;
+  UDT& operator=(const UDT&) = default;
 };
-bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
-std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
+static bool AbslParseFlag(absl::string_view, UDT*, std::string*) {
+  return true;
+}
+static std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
 
 ABSL_FLAG(UDT, usage_reporting_test_flag_05, {},
           "usage_reporting_test_flag_05 help message");
@@ -87,6 +92,11 @@
     default_config.normalize_filename = &NormalizeFileName;
     absl::SetFlagsUsageConfig(default_config);
   }
+  ~UsageReportingTest() override {
+    flags::SetFlagsHelpMode(flags::HelpMode::kNone);
+    flags::SetFlagsHelpMatchSubstr("");
+    flags::SetFlagsHelpFormat(flags::HelpFormat::kHumanReadable);
+  }
 
  private:
   absl::FlagSaver flag_saver_;
@@ -97,14 +107,19 @@
 using UsageReportingDeathTest = UsageReportingTest;
 
 TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) {
+#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL
+  // Check for kTestUsageMessage set in main() below.
   EXPECT_EQ(absl::ProgramUsageMessage(), kTestUsageMessage);
+#else
+  // Check for part of the usage message set by GoogleTest.
+  EXPECT_THAT(absl::ProgramUsageMessage(),
+              ::testing::HasSubstr(
+                  "This program contains tests written using Google Test"));
+#endif
 
-#ifndef _WIN32
-  // TODO(rogeeff): figure out why this does not work on Windows.
   EXPECT_DEATH_IF_SUPPORTED(
       absl::SetProgramUsageMessage("custom usage message"),
-      ".*SetProgramUsageMessage\\(\\) called twice.*");
-#endif
+      ::testing::HasSubstr("SetProgramUsageMessage() called twice"));
 }
 
 // --------------------------------------------------------------------
@@ -190,7 +205,15 @@
 
       Some more help.
       Even more long long long long long long long long long long long long help
-      message.); default: "";
+      message.); default: "";)"
+
+      "\n    --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+      "help\n\n      \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+      R"(
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )";
 
   std::stringstream test_buf_01;
@@ -214,7 +237,11 @@
   EXPECT_EQ(test_buf_04.str(),
             R"(usage_test: Custom usage message
 
-  No modules matched: use -helpfull
+No flags matched.
+
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )");
 
   std::stringstream test_buf_05;
@@ -226,30 +253,29 @@
       absl::StartsWith(test_out_str, "usage_test: Custom usage message"));
   EXPECT_TRUE(absl::StrContains(
       test_out_str, "Flags from absl/flags/internal/usage_test.cc:"));
-  EXPECT_TRUE(absl::StrContains(test_out_str,
-                                "Flags from absl/flags/internal/usage.cc:"));
   EXPECT_TRUE(
       absl::StrContains(test_out_str, "-usage_reporting_test_flag_01 "));
-  EXPECT_TRUE(absl::StrContains(test_out_str, "-help (show help"))
-      << test_out_str;
 }
 
 // --------------------------------------------------------------------
 
 TEST_F(UsageReportingTest, TestNoUsageFlags) {
   std::stringstream test_buf;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), -1);
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kNone);
 }
 
 // --------------------------------------------------------------------
 
 TEST_F(UsageReportingTest, TestUsageFlag_helpshort) {
-  absl::SetFlag(&FLAGS_helpshort, true);
+  flags::SetFlagsHelpMode(flags::HelpMode::kShort);
 
   std::stringstream test_buf;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
-  EXPECT_EQ(test_buf.str(),
-            R"(usage_test: Custom usage message
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kShort);
+  EXPECT_EQ(
+      test_buf.str(),
+      R"(usage_test: Custom usage message
 
   Flags from absl/flags/internal/usage_test.cc:
     --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -266,19 +292,29 @@
 
       Some more help.
       Even more long long long long long long long long long long long long help
-      message.); default: "";
+      message.); default: "";)"
+
+      "\n    --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+      "help\n\n      \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+      R"(
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )");
 }
 
 // --------------------------------------------------------------------
 
-TEST_F(UsageReportingTest, TestUsageFlag_help) {
-  absl::SetFlag(&FLAGS_help, true);
+TEST_F(UsageReportingTest, TestUsageFlag_help_simple) {
+  flags::SetFlagsHelpMode(flags::HelpMode::kImportant);
 
   std::stringstream test_buf;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
-  EXPECT_EQ(test_buf.str(),
-            R"(usage_test: Custom usage message
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kImportant);
+  EXPECT_EQ(
+      test_buf.str(),
+      R"(usage_test: Custom usage message
 
   Flags from absl/flags/internal/usage_test.cc:
     --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -295,21 +331,94 @@
 
       Some more help.
       Even more long long long long long long long long long long long long help
+      message.); default: "";)"
+
+      "\n    --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+      "help\n\n      \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+      R"(
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
+)");
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(UsageReportingTest, TestUsageFlag_help_one_flag) {
+  flags::SetFlagsHelpMode(flags::HelpMode::kMatch);
+  flags::SetFlagsHelpMatchSubstr("usage_reporting_test_flag_06");
+
+  std::stringstream test_buf;
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kMatch);
+  EXPECT_EQ(test_buf.str(),
+            R"(usage_test: Custom usage message
+
+  Flags from absl/flags/internal/usage_test.cc:
+    --usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
+
+      Some more help.
+      Even more long long long long long long long long long long long long help
       message.); default: "";
 
-Try --helpfull to get a list of all flags.
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
+)");
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(UsageReportingTest, TestUsageFlag_help_multiple_flag) {
+  flags::SetFlagsHelpMode(flags::HelpMode::kMatch);
+  flags::SetFlagsHelpMatchSubstr("test_flag");
+
+  std::stringstream test_buf;
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kMatch);
+  EXPECT_EQ(
+      test_buf.str(),
+      R"(usage_test: Custom usage message
+
+  Flags from absl/flags/internal/usage_test.cc:
+    --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
+      default: 101;
+    --usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
+      default: false;
+    --usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
+      default: 1.03;
+    --usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
+      default: 1000000000000004;
+    --usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
+      default: UDT{};
+    --usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
+
+      Some more help.
+      Even more long long long long long long long long long long long long help
+      message.); default: "";)"
+
+      "\n    --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+      "help\n\n      \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+      R"(
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )");
 }
 
 // --------------------------------------------------------------------
 
 TEST_F(UsageReportingTest, TestUsageFlag_helppackage) {
-  absl::SetFlag(&FLAGS_helppackage, true);
+  flags::SetFlagsHelpMode(flags::HelpMode::kPackage);
 
   std::stringstream test_buf;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
-  EXPECT_EQ(test_buf.str(),
-            R"(usage_test: Custom usage message
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kPackage);
+  EXPECT_EQ(
+      test_buf.str(),
+      R"(usage_test: Custom usage message
 
   Flags from absl/flags/internal/usage_test.cc:
     --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -326,19 +435,26 @@
 
       Some more help.
       Even more long long long long long long long long long long long long help
-      message.); default: "";
+      message.); default: "";)"
 
-Try --helpfull to get a list of all flags.
+      "\n    --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+      "help\n\n      \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+      R"(
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )");
 }
 
 // --------------------------------------------------------------------
 
 TEST_F(UsageReportingTest, TestUsageFlag_version) {
-  absl::SetFlag(&FLAGS_version, true);
+  flags::SetFlagsHelpMode(flags::HelpMode::kVersion);
 
   std::stringstream test_buf;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 0);
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kVersion);
 #ifndef NDEBUG
   EXPECT_EQ(test_buf.str(), "usage_test\nDebug build (NDEBUG not #defined)\n");
 #else
@@ -349,32 +465,41 @@
 // --------------------------------------------------------------------
 
 TEST_F(UsageReportingTest, TestUsageFlag_only_check_args) {
-  absl::SetFlag(&FLAGS_only_check_args, true);
+  flags::SetFlagsHelpMode(flags::HelpMode::kOnlyCheckArgs);
 
   std::stringstream test_buf;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 0);
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+            flags::HelpMode::kOnlyCheckArgs);
   EXPECT_EQ(test_buf.str(), "");
 }
 
 // --------------------------------------------------------------------
 
 TEST_F(UsageReportingTest, TestUsageFlag_helpon) {
-  absl::SetFlag(&FLAGS_helpon, "bla-bla");
+  flags::SetFlagsHelpMode(flags::HelpMode::kMatch);
+  flags::SetFlagsHelpMatchSubstr("/bla-bla.");
 
   std::stringstream test_buf_01;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf_01, kTestUsageMessage), 1);
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf_01, kTestUsageMessage),
+            flags::HelpMode::kMatch);
   EXPECT_EQ(test_buf_01.str(),
             R"(usage_test: Custom usage message
 
-  No modules matched: use -helpfull
+No flags matched.
+
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )");
 
-  absl::SetFlag(&FLAGS_helpon, "usage_test");
+  flags::SetFlagsHelpMatchSubstr("/usage_test.");
 
   std::stringstream test_buf_02;
-  EXPECT_EQ(flags::HandleUsageFlags(test_buf_02, kTestUsageMessage), 1);
-  EXPECT_EQ(test_buf_02.str(),
-            R"(usage_test: Custom usage message
+  EXPECT_EQ(flags::HandleUsageFlags(test_buf_02, kTestUsageMessage),
+            flags::HelpMode::kMatch);
+  EXPECT_EQ(
+      test_buf_02.str(),
+      R"(usage_test: Custom usage message
 
   Flags from absl/flags/internal/usage_test.cc:
     --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -391,7 +516,15 @@
 
       Some more help.
       Even more long long long long long long long long long long long long help
-      message.); default: "";
+      message.); default: "";)"
+
+      "\n    --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+      "help\n\n      \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+      R"(
+Try --helpfull to get a list of all flags or --help=substring shows help for
+flags which include specified substring in either in the name, or description or
+path.
 )");
 }
 
@@ -402,8 +535,10 @@
 int main(int argc, char* argv[]) {
   (void)absl::GetFlag(FLAGS_undefok);  // Force linking of parse.cc
   flags::SetProgramInvocationName("usage_test");
+#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL
+  // GoogleTest calls absl::SetProgramUsageMessage() already.
   absl::SetProgramUsageMessage(kTestUsageMessage);
+#endif
   ::testing::InitGoogleTest(&argc, argv);
-
   return RUN_ALL_TESTS();
 }
diff --git a/abseil-cpp/absl/flags/marshalling.cc b/abseil-cpp/absl/flags/marshalling.cc
index 81f9ceb..dc69754 100644
--- a/abseil-cpp/absl/flags/marshalling.cc
+++ b/abseil-cpp/absl/flags/marshalling.cc
@@ -19,6 +19,7 @@
 
 #include <cmath>
 #include <limits>
+#include <sstream>
 #include <string>
 #include <type_traits>
 #include <vector>
@@ -26,6 +27,7 @@
 #include "absl/base/config.h"
 #include "absl/base/log_severity.h"
 #include "absl/base/macros.h"
+#include "absl/numeric/int128.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/match.h"
 #include "absl/strings/numbers.h"
@@ -68,8 +70,10 @@
 // puts us in base 16.  But leading 0 does not put us in base 8. It
 // caused too many bugs when we had that behavior.
 static int NumericBase(absl::string_view text) {
-  const bool hex = (text.size() >= 2 && text[0] == '0' &&
-                    (text[1] == 'x' || text[1] == 'X'));
+  if (text.empty()) return 0;
+  size_t num_start = (text[0] == '-' || text[0] == '+') ? 1 : 0;
+  const bool hex = (text.size() >= num_start + 2 && text[num_start] == '0' &&
+                    (text[num_start + 1] == 'x' || text[num_start + 1] == 'X'));
   return hex ? 16 : 10;
 }
 
@@ -125,6 +129,32 @@
   return ParseFlagImpl(text, *dst);
 }
 
+bool AbslParseFlag(absl::string_view text, absl::int128* dst, std::string*) {
+  text = absl::StripAsciiWhitespace(text);
+
+  // check hex
+  int base = NumericBase(text);
+  if (!absl::numbers_internal::safe_strto128_base(text, dst, base)) {
+    return false;
+  }
+
+  return base == 16 ? absl::SimpleHexAtoi(text, dst)
+                    : absl::SimpleAtoi(text, dst);
+}
+
+bool AbslParseFlag(absl::string_view text, absl::uint128* dst, std::string*) {
+  text = absl::StripAsciiWhitespace(text);
+
+  // check hex
+  int base = NumericBase(text);
+  if (!absl::numbers_internal::safe_strtou128_base(text, dst, base)) {
+    return false;
+  }
+
+  return base == 16 ? absl::SimpleHexAtoi(text, dst)
+                    : absl::SimpleAtoi(text, dst);
+}
+
 // --------------------------------------------------------------------
 // AbslParseFlag for floating point types.
 
@@ -171,6 +201,17 @@
 std::string Unparse(unsigned long v) { return absl::StrCat(v); }
 std::string Unparse(long long v) { return absl::StrCat(v); }
 std::string Unparse(unsigned long long v) { return absl::StrCat(v); }
+std::string Unparse(absl::int128 v) {
+  std::stringstream ss;
+  ss << v;
+  return ss.str();
+}
+std::string Unparse(absl::uint128 v) {
+  std::stringstream ss;
+  ss << v;
+  return ss.str();
+}
+
 template <typename T>
 std::string UnparseFloatingPointVal(T v) {
   // digits10 is guaranteed to roundtrip correctly in string -> value -> string
diff --git a/abseil-cpp/absl/flags/marshalling.h b/abseil-cpp/absl/flags/marshalling.h
index 0b50335..301213a 100644
--- a/abseil-cpp/absl/flags/marshalling.h
+++ b/abseil-cpp/absl/flags/marshalling.h
@@ -33,6 +33,7 @@
 // * `double`
 // * `std::string`
 // * `std::vector<std::string>`
+// * `std::optional<T>`
 // * `absl::LogSeverity` (provided natively for layering reasons)
 //
 // Note that support for integral types is implemented using overloads for
@@ -65,6 +66,42 @@
 // below.)
 //
 // -----------------------------------------------------------------------------
+// Optional Flags
+// -----------------------------------------------------------------------------
+//
+// The Abseil flags library supports flags of type `std::optional<T>` where
+// `T` is a type of one of the supported flags. We refer to this flag type as
+// an "optional flag." An optional flag is either "valueless", holding no value
+// of type `T` (indicating that the flag has not been set) or a value of type
+// `T`. The valueless state in C++ code is represented by a value of
+// `std::nullopt` for the optional flag.
+//
+// Using `std::nullopt` as an optional flag's default value allows you to check
+// whether such a flag was ever specified on the command line:
+//
+//   if (absl::GetFlag(FLAGS_foo).has_value()) {
+//     // flag was set on command line
+//   } else {
+//     // flag was not passed on command line
+//   }
+//
+// Using an optional flag in this manner avoids common workarounds for
+// indicating such an unset flag (such as using sentinel values to indicate this
+// state).
+//
+// An optional flag also allows a developer to pass a flag in an "unset"
+// valueless state on the command line, allowing the flag to later be set in
+// binary logic. An optional flag's valueless state is indicated by the special
+// notation of passing the value as an empty string through the syntax `--flag=`
+// or `--flag ""`.
+//
+//   $ binary_with_optional --flag_in_unset_state=
+//   $ binary_with_optional --flag_in_unset_state ""
+//
+// Note: as a result of the above syntax requirements, an optional flag cannot
+// be set to a `T` of any value which unparses to the empty string.
+//
+// -----------------------------------------------------------------------------
 // Adding Type Support for Abseil Flags
 // -----------------------------------------------------------------------------
 //
@@ -83,7 +120,7 @@
 //   // AbslParseFlag converts from a string to OutputMode.
 //   // Must be in same namespace as OutputMode.
 //
-//   // Parses an OutputMode from the command line flag value `text. Returns
+//   // Parses an OutputMode from the command line flag value `text`. Returns
 //   // `true` and sets `*mode` on success; returns `false` and sets `*error`
 //   // on failure.
 //   bool AbslParseFlag(absl::string_view text,
@@ -139,7 +176,7 @@
 //
 //   // Within the implementation, `AbslParseFlag()` will, in turn invoke
 //   // `absl::ParseFlag()` on its constituent `int` and `std::string` types
-//   // (which have built-in Abseil flag support.
+//   // (which have built-in Abseil flag support).
 //
 //   bool AbslParseFlag(absl::string_view text, MyFlagType* flag,
 //                      std::string* err) {
@@ -162,14 +199,28 @@
 #ifndef ABSL_FLAGS_MARSHALLING_H_
 #define ABSL_FLAGS_MARSHALLING_H_
 
+#include "absl/base/config.h"
+#include "absl/numeric/int128.h"
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+#include <optional>
+#endif
 #include <string>
 #include <vector>
 
-#include "absl/base/config.h"
 #include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
+
+// Forward declaration to be used inside composable flag parse/unparse
+// implementations
+template <typename T>
+inline bool ParseFlag(absl::string_view input, T* dst, std::string* error);
+template <typename T>
+inline std::string UnparseFlag(const T& v);
+
 namespace flags_internal {
 
 // Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types.
@@ -183,12 +234,44 @@
 bool AbslParseFlag(absl::string_view, long long*, std::string*);       // NOLINT
 bool AbslParseFlag(absl::string_view, unsigned long long*,             // NOLINT
                    std::string*);
+bool AbslParseFlag(absl::string_view, absl::int128*, std::string*);    // NOLINT
+bool AbslParseFlag(absl::string_view, absl::uint128*, std::string*);   // NOLINT
 bool AbslParseFlag(absl::string_view, float*, std::string*);
 bool AbslParseFlag(absl::string_view, double*, std::string*);
 bool AbslParseFlag(absl::string_view, std::string*, std::string*);
 bool AbslParseFlag(absl::string_view, std::vector<std::string>*, std::string*);
 
 template <typename T>
+bool AbslParseFlag(absl::string_view text, absl::optional<T>* f,
+                   std::string* err) {
+  if (text.empty()) {
+    *f = absl::nullopt;
+    return true;
+  }
+  T value;
+  if (!absl::ParseFlag(text, &value, err)) return false;
+
+  *f = std::move(value);
+  return true;
+}
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+template <typename T>
+bool AbslParseFlag(absl::string_view text, std::optional<T>* f,
+                   std::string* err) {
+  if (text.empty()) {
+    *f = std::nullopt;
+    return true;
+  }
+  T value;
+  if (!absl::ParseFlag(text, &value, err)) return false;
+
+  *f = std::move(value);
+  return true;
+}
+#endif
+
+template <typename T>
 bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) {
   // Comment on next line provides a good compiler error message if T
   // does not have AbslParseFlag(absl::string_view, T*, std::string*).
@@ -202,6 +285,18 @@
 std::string AbslUnparseFlag(const std::vector<std::string>&);
 
 template <typename T>
+std::string AbslUnparseFlag(const absl::optional<T>& f) {
+  return f.has_value() ? absl::UnparseFlag(*f) : "";
+}
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+template <typename T>
+std::string AbslUnparseFlag(const std::optional<T>& f) {
+  return f.has_value() ? absl::UnparseFlag(*f) : "";
+}
+#endif
+
+template <typename T>
 std::string Unparse(const T& v) {
   // Comment on next line provides a good compiler error message if T does not
   // have UnparseFlag.
@@ -218,6 +313,8 @@
 std::string Unparse(unsigned long v);       // NOLINT
 std::string Unparse(long long v);           // NOLINT
 std::string Unparse(unsigned long long v);  // NOLINT
+std::string Unparse(absl::int128 v);
+std::string Unparse(absl::uint128 v);
 std::string Unparse(float v);
 std::string Unparse(double v);
 
diff --git a/abseil-cpp/absl/flags/marshalling_test.cc b/abseil-cpp/absl/flags/marshalling_test.cc
index 4a64ce1..b0e055f 100644
--- a/abseil-cpp/absl/flags/marshalling_test.cc
+++ b/abseil-cpp/absl/flags/marshalling_test.cc
@@ -137,11 +137,10 @@
   EXPECT_EQ(value, 16);
   EXPECT_TRUE(absl::ParseFlag("0X234", &value, &err));
   EXPECT_EQ(value, 564);
-  // TODO(rogeeff): fix below validations
-  EXPECT_FALSE(absl::ParseFlag("-0x7FFD", &value, &err));
-  EXPECT_NE(value, -3);
-  EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
-  EXPECT_NE(value, 49);
+  EXPECT_TRUE(absl::ParseFlag("-0x7FFD", &value, &err));
+  EXPECT_EQ(value, -32765);
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
 
   // Whitespace handling
   EXPECT_TRUE(absl::ParseFlag("10  ", &value, &err));
@@ -194,9 +193,8 @@
   EXPECT_EQ(value, 16);
   EXPECT_TRUE(absl::ParseFlag("0X234", &value, &err));
   EXPECT_EQ(value, 564);
-  // TODO(rogeeff): fix below validations
-  EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
-  EXPECT_NE(value, 49);
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
 
   // Whitespace handling
   EXPECT_TRUE(absl::ParseFlag("10  ", &value, &err));
@@ -254,11 +252,11 @@
   EXPECT_EQ(value, 16);
   EXPECT_TRUE(absl::ParseFlag("0X234", &value, &err));
   EXPECT_EQ(value, 564);
-  // TODO(rogeeff): fix below validations
-  EXPECT_FALSE(absl::ParseFlag("-0x7FFFFFFD", &value, &err));
-  EXPECT_NE(value, -3);
-  EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
-  EXPECT_NE(value, 49);
+
+  EXPECT_TRUE(absl::ParseFlag("-0x7FFFFFFD", &value, &err));
+  EXPECT_EQ(value, -2147483645);
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
 
   // Whitespace handling
   EXPECT_TRUE(absl::ParseFlag("10  ", &value, &err));
@@ -311,9 +309,8 @@
   EXPECT_EQ(value, 564);
   EXPECT_TRUE(absl::ParseFlag("0xFFFFFFFD", &value, &err));
   EXPECT_EQ(value, 4294967293);
-  // TODO(rogeeff): fix below validations
-  EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
-  EXPECT_NE(value, 49);
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
 
   // Whitespace handling
   EXPECT_TRUE(absl::ParseFlag("10  ", &value, &err));
@@ -371,11 +368,12 @@
   EXPECT_EQ(value, 16);
   EXPECT_TRUE(absl::ParseFlag("0XFFFAAABBBCCCDDD", &value, &err));
   EXPECT_EQ(value, 1152827684197027293);
-  // TODO(rogeeff): fix below validation
-  EXPECT_FALSE(absl::ParseFlag("-0x7FFFFFFFFFFFFFFE", &value, &err));
-  EXPECT_NE(value, -2);
-  EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
-  EXPECT_NE(value, 49);
+  EXPECT_TRUE(absl::ParseFlag("-0x7FFFFFFFFFFFFFFE", &value, &err));
+  EXPECT_EQ(value, -9223372036854775806);
+  EXPECT_TRUE(absl::ParseFlag("-0x02", &value, &err));
+  EXPECT_EQ(value, -2);
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
 
   // Whitespace handling
   EXPECT_TRUE(absl::ParseFlag("10  ", &value, &err));
@@ -428,9 +426,8 @@
   EXPECT_EQ(value, 16);
   EXPECT_TRUE(absl::ParseFlag("0XFFFF", &value, &err));
   EXPECT_EQ(value, 65535);
-  // TODO(rogeeff): fix below validation
-  EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
-  EXPECT_NE(value, 49);
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
 
   // Whitespace handling
   EXPECT_TRUE(absl::ParseFlag("10  ", &value, &err));
@@ -455,6 +452,125 @@
 
 // --------------------------------------------------------------------
 
+TEST(MarshallingTest, TestInt128Parsing) {
+  std::string err;
+  absl::int128 value;
+
+  // Decimal values.
+  EXPECT_TRUE(absl::ParseFlag("0", &value, &err));
+  EXPECT_EQ(value, 0);
+  EXPECT_TRUE(absl::ParseFlag("1", &value, &err));
+  EXPECT_EQ(value, 1);
+  EXPECT_TRUE(absl::ParseFlag("-1", &value, &err));
+  EXPECT_EQ(value, -1);
+  EXPECT_TRUE(absl::ParseFlag("123", &value, &err));
+  EXPECT_EQ(value, 123);
+  EXPECT_TRUE(absl::ParseFlag("-98765", &value, &err));
+  EXPECT_EQ(value, -98765);
+  EXPECT_TRUE(absl::ParseFlag("+3", &value, &err));
+  EXPECT_EQ(value, 3);
+
+  // Leading zero values.
+  EXPECT_TRUE(absl::ParseFlag("01", &value, &err));
+  EXPECT_EQ(value, 1);
+  EXPECT_TRUE(absl::ParseFlag("001", &value, &err));
+  EXPECT_EQ(value, 1);
+  EXPECT_TRUE(absl::ParseFlag("0000100", &value, &err));
+  EXPECT_EQ(value, 100);
+
+  // Hex values.
+  EXPECT_TRUE(absl::ParseFlag("0x10", &value, &err));
+  EXPECT_EQ(value, 16);
+  EXPECT_TRUE(absl::ParseFlag("0xFFFAAABBBCCCDDD", &value, &err));
+  EXPECT_EQ(value, 1152827684197027293);
+  EXPECT_TRUE(absl::ParseFlag("0xFFF0FFFFFFFFFFFFFFF", &value, &err));
+  EXPECT_EQ(value, absl::MakeInt128(0x000000000000fff, 0xFFFFFFFFFFFFFFF));
+
+  EXPECT_TRUE(absl::ParseFlag("-0x10000000000000000", &value, &err));
+  EXPECT_EQ(value, absl::MakeInt128(-1, 0));
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
+
+  // Whitespace handling
+  EXPECT_TRUE(absl::ParseFlag("16  ", &value, &err));
+  EXPECT_EQ(value, 16);
+  EXPECT_TRUE(absl::ParseFlag("  16", &value, &err));
+  EXPECT_EQ(value, 16);
+  EXPECT_TRUE(absl::ParseFlag("  0100  ", &value, &err));
+  EXPECT_EQ(value, 100);
+  EXPECT_TRUE(absl::ParseFlag(" 0x7B    ", &value, &err));
+  EXPECT_EQ(value, 123);
+
+  // Invalid values.
+  EXPECT_FALSE(absl::ParseFlag("", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag(" ", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("  ", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("--1", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("\n", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("\t", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("2U", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("FFF", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestUint128Parsing) {
+  std::string err;
+  absl::uint128 value;
+
+  // Decimal values.
+  EXPECT_TRUE(absl::ParseFlag("0", &value, &err));
+  EXPECT_EQ(value, 0);
+  EXPECT_TRUE(absl::ParseFlag("1", &value, &err));
+  EXPECT_EQ(value, 1);
+  EXPECT_TRUE(absl::ParseFlag("123", &value, &err));
+  EXPECT_EQ(value, 123);
+  EXPECT_TRUE(absl::ParseFlag("+3", &value, &err));
+  EXPECT_EQ(value, 3);
+
+  // Leading zero values.
+  EXPECT_TRUE(absl::ParseFlag("01", &value, &err));
+  EXPECT_EQ(value, 1);
+  EXPECT_TRUE(absl::ParseFlag("001", &value, &err));
+  EXPECT_EQ(value, 1);
+  EXPECT_TRUE(absl::ParseFlag("0000100", &value, &err));
+  EXPECT_EQ(value, 100);
+
+  // Hex values.
+  EXPECT_TRUE(absl::ParseFlag("0x10", &value, &err));
+  EXPECT_EQ(value, 16);
+  EXPECT_TRUE(absl::ParseFlag("0xFFFAAABBBCCCDDD", &value, &err));
+  EXPECT_EQ(value, 1152827684197027293);
+  EXPECT_TRUE(absl::ParseFlag("0xFFF0FFFFFFFFFFFFFFF", &value, &err));
+  EXPECT_EQ(value, absl::MakeInt128(0x000000000000fff, 0xFFFFFFFFFFFFFFF));
+  EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+  EXPECT_EQ(value, 49);
+
+  // Whitespace handling
+  EXPECT_TRUE(absl::ParseFlag("16  ", &value, &err));
+  EXPECT_EQ(value, 16);
+  EXPECT_TRUE(absl::ParseFlag("  16", &value, &err));
+  EXPECT_EQ(value, 16);
+  EXPECT_TRUE(absl::ParseFlag("  0100  ", &value, &err));
+  EXPECT_EQ(value, 100);
+  EXPECT_TRUE(absl::ParseFlag(" 0x7B    ", &value, &err));
+  EXPECT_EQ(value, 123);
+
+  // Invalid values.
+  EXPECT_FALSE(absl::ParseFlag("", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag(" ", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("  ", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("-1", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("--1", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("\n", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("\t", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("2U", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("FFF", &value, &err));
+  EXPECT_FALSE(absl::ParseFlag("-0x10000000000000000", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
 TEST(MarshallingTest, TestFloatParsing) {
   std::string err;
   float value;
@@ -659,6 +775,88 @@
 
 // --------------------------------------------------------------------
 
+TEST(MarshallingTest, TestOptionalBoolParsing) {
+  std::string err;
+  absl::optional<bool> value;
+
+  EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+  EXPECT_FALSE(value.has_value());
+
+  EXPECT_TRUE(absl::ParseFlag("true", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_TRUE(*value);
+
+  EXPECT_TRUE(absl::ParseFlag("false", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_FALSE(*value);
+
+  EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalIntParsing) {
+  std::string err;
+  absl::optional<int> value;
+
+  EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+  EXPECT_FALSE(value.has_value());
+
+  EXPECT_TRUE(absl::ParseFlag("10", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, 10);
+
+  EXPECT_TRUE(absl::ParseFlag("0x1F", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, 31);
+
+  EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalDoubleParsing) {
+  std::string err;
+  absl::optional<double> value;
+
+  EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+  EXPECT_FALSE(value.has_value());
+
+  EXPECT_TRUE(absl::ParseFlag("1.11", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, 1.11);
+
+  EXPECT_TRUE(absl::ParseFlag("-0.12", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, -0.12);
+
+  EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalStringParsing) {
+  std::string err;
+  absl::optional<std::string> value;
+
+  EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+  EXPECT_FALSE(value.has_value());
+
+  EXPECT_TRUE(absl::ParseFlag(" ", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, " ");
+
+  EXPECT_TRUE(absl::ParseFlag("aqswde", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, "aqswde");
+
+  EXPECT_TRUE(absl::ParseFlag("nullopt", &value, &err));
+  EXPECT_TRUE(value.has_value());
+  EXPECT_EQ(*value, "nullopt");
+}
+
+// --------------------------------------------------------------------
+
 TEST(MarshallingTest, TestBoolUnparsing) {
   EXPECT_EQ(absl::UnparseFlag(true), "true");
   EXPECT_EQ(absl::UnparseFlag(false), "false");
@@ -762,6 +960,40 @@
 
 // --------------------------------------------------------------------
 
+TEST(MarshallingTest, TestInt128Unparsing) {
+  absl::int128 value;
+
+  value = 1;
+  EXPECT_EQ(absl::UnparseFlag(value), "1");
+  value = 0;
+  EXPECT_EQ(absl::UnparseFlag(value), "0");
+  value = -1;
+  EXPECT_EQ(absl::UnparseFlag(value), "-1");
+  value = 123456789L;
+  EXPECT_EQ(absl::UnparseFlag(value), "123456789");
+  value = -987654321L;
+  EXPECT_EQ(absl::UnparseFlag(value), "-987654321");
+  value = 0x7FFFFFFFFFFFFFFF;
+  EXPECT_EQ(absl::UnparseFlag(value), "9223372036854775807");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestUint128Unparsing) {
+  absl::uint128 value;
+
+  value = 1;
+  EXPECT_EQ(absl::UnparseFlag(value), "1");
+  value = 0;
+  EXPECT_EQ(absl::UnparseFlag(value), "0");
+  value = 123456789L;
+  EXPECT_EQ(absl::UnparseFlag(value), "123456789");
+  value = absl::MakeUint128(0, 0xFFFFFFFFFFFFFFFF);
+  EXPECT_EQ(absl::UnparseFlag(value), "18446744073709551615");
+}
+
+// --------------------------------------------------------------------
+
 TEST(MarshallingTest, TestFloatUnparsing) {
   float value;
 
@@ -808,6 +1040,90 @@
 
 // --------------------------------------------------------------------
 
+TEST(MarshallingTest, TestOptionalBoolUnparsing) {
+  absl::optional<bool> value;
+
+  EXPECT_EQ(absl::UnparseFlag(value), "");
+  value = true;
+  EXPECT_EQ(absl::UnparseFlag(value), "true");
+  value = false;
+  EXPECT_EQ(absl::UnparseFlag(value), "false");
+  value = absl::nullopt;
+  EXPECT_EQ(absl::UnparseFlag(value), "");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalIntUnparsing) {
+  absl::optional<int> value;
+
+  EXPECT_EQ(absl::UnparseFlag(value), "");
+  value = 0;
+  EXPECT_EQ(absl::UnparseFlag(value), "0");
+  value = -12;
+  EXPECT_EQ(absl::UnparseFlag(value), "-12");
+  value = absl::nullopt;
+  EXPECT_EQ(absl::UnparseFlag(value), "");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalDoubleUnparsing) {
+  absl::optional<double> value;
+
+  EXPECT_EQ(absl::UnparseFlag(value), "");
+  value = 1.;
+  EXPECT_EQ(absl::UnparseFlag(value), "1");
+  value = -1.23;
+  EXPECT_EQ(absl::UnparseFlag(value), "-1.23");
+  value = absl::nullopt;
+  EXPECT_EQ(absl::UnparseFlag(value), "");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalStringUnparsing) {
+  absl::optional<std::string> strvalue;
+  EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+
+  strvalue = "asdfg";
+  EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg");
+
+  strvalue = " ";
+  EXPECT_EQ(absl::UnparseFlag(strvalue), " ");
+
+  strvalue = "";  // It is UB to set an optional string flag to ""
+  EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+}
+
+// --------------------------------------------------------------------
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+
+TEST(MarshallingTest, TestStdOptionalUnparsing) {
+  std::optional<std::string> strvalue;
+  EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+
+  strvalue = "asdfg";
+  EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg");
+
+  strvalue = " ";
+  EXPECT_EQ(absl::UnparseFlag(strvalue), " ");
+
+  strvalue = "";  // It is UB to set an optional string flag to ""
+  EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+
+  std::optional<int> intvalue;
+  EXPECT_EQ(absl::UnparseFlag(intvalue), "");
+
+  intvalue = 10;
+  EXPECT_EQ(absl::UnparseFlag(intvalue), "10");
+}
+
+// --------------------------------------------------------------------
+
+#endif
+
 template <typename T>
 void TestRoundtrip(T v) {
   T new_v;
diff --git a/abseil-cpp/absl/flags/parse.cc b/abseil-cpp/absl/flags/parse.cc
index 4f4bb3d..4cdd9d0 100644
--- a/abseil-cpp/absl/flags/parse.cc
+++ b/abseil-cpp/absl/flags/parse.cc
@@ -18,9 +18,11 @@
 #include <stdlib.h>
 
 #include <algorithm>
+#include <cstdint>
+#include <cstdlib>
 #include <fstream>
 #include <iostream>
-#include <iterator>
+#include <ostream>
 #include <string>
 #include <tuple>
 #include <utility>
@@ -30,6 +32,7 @@
 #include <windows.h>
 #endif
 
+#include "absl/algorithm/container.h"
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/const_init.h"
@@ -47,7 +50,9 @@
 #include "absl/flags/usage.h"
 #include "absl/flags/usage_config.h"
 #include "absl/strings/ascii.h"
+#include "absl/strings/internal/damerau_levenshtein_distance.h"
 #include "absl/strings/str_cat.h"
+#include "absl/strings/str_join.h"
 #include "absl/strings/string_view.h"
 #include "absl/strings/strip.h"
 #include "absl/synchronization/mutex.h"
@@ -72,6 +77,11 @@
 ABSL_CONST_INIT std::vector<const CommandLineFlag*>* specified_flags
     ABSL_GUARDED_BY(specified_flags_guard) = nullptr;
 
+// Suggesting at most kMaxHints flags in case of misspellings.
+ABSL_CONST_INIT const size_t kMaxHints = 100;
+// Suggesting only flags which have a smaller distance than kMaxDistance.
+ABSL_CONST_INIT const size_t kMaxDistance = 3;
+
 struct SpecifiedFlagsCompare {
   bool operator()(const CommandLineFlag* a, const CommandLineFlag* b) const {
     return a->Name() < b->Name();
@@ -89,6 +99,8 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
+// These flags influence how command line flags are parsed and are only intended
+// to be set on the command line.  Avoid reading or setting them from C++ code.
 ABSL_FLAG(std::vector<std::string>, flagfile, {},
           "comma-separated list of files to load flags from")
     .OnUpdate([]() {
@@ -138,6 +150,8 @@
       absl::flags_internal::tryfromenv_needs_processing = true;
     });
 
+// Rather than reading or setting --undefok from C++ code, please consider using
+// ABSL_RETIRED_FLAG instead.
 ABSL_FLAG(std::vector<std::string>, undefok, {},
           "comma-separated list of flag names that it is okay to specify "
           "on the command line even if the program does not define a flag "
@@ -159,14 +173,14 @@
   // Returns success status: true if parsing successful, false otherwise.
   bool ReadFromFlagfile(const std::string& flag_file_name);
 
-  int Size() const { return args_.size() - next_arg_; }
-  int FrontIndex() const { return next_arg_; }
+  size_t Size() const { return args_.size() - next_arg_; }
+  size_t FrontIndex() const { return next_arg_; }
   absl::string_view Front() const { return args_[next_arg_]; }
   void PopFront() { next_arg_++; }
 
  private:
   std::vector<std::string> args_;
-  int next_arg_;
+  size_t next_arg_;
 };
 
 bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) {
@@ -181,7 +195,7 @@
 
   // This argument represents fake argv[0], which should be present in all arg
   // lists.
-  args_.push_back("");
+  args_.emplace_back("");
 
   std::string line;
   bool success = true;
@@ -203,7 +217,7 @@
         break;
       }
 
-      args_.push_back(std::string(stripped));
+      args_.emplace_back(stripped);
       continue;
     }
 
@@ -269,7 +283,7 @@
     return std::make_tuple("", "", false);
   }
 
-  auto equal_sign_pos = arg.find("=");
+  auto equal_sign_pos = arg.find('=');
 
   absl::string_view flag_name = arg.substr(0, equal_sign_pos);
 
@@ -358,7 +372,7 @@
 
   // This argument represents fake argv[0], which should be present in all arg
   // lists.
-  args.push_back("");
+  args.emplace_back("");
 
   for (const auto& flag_name : flag_names) {
     // Avoid infinite recursion.
@@ -407,7 +421,7 @@
   // programmatically before invoking ParseCommandLine. Note that we do not
   // actually process arguments specified in the flagfile, but instead
   // create a secondary arguments list to be processed along with the rest
-  // of the comamnd line arguments. Since we always the process most recently
+  // of the command line arguments. Since we always the process most recently
   // created list of arguments first, this will result in flagfile argument
   // being processed before any other argument in the command line. If
   // FLAGS_flagfile contains more than one file name we create multiple new
@@ -590,6 +604,34 @@
   return false;
 }
 
+// --------------------------------------------------------------------
+
+void ReportUnrecognizedFlags(
+    const std::vector<UnrecognizedFlag>& unrecognized_flags,
+    bool report_as_fatal_error) {
+  for (const auto& unrecognized : unrecognized_flags) {
+    // Verify if flag_name has the "no" already removed
+    std::vector<std::string> misspelling_hints;
+    if (unrecognized.source == UnrecognizedFlag::kFromArgv) {
+      misspelling_hints =
+          flags_internal::GetMisspellingHints(unrecognized.flag_name);
+    }
+
+    if (misspelling_hints.empty()) {
+      flags_internal::ReportUsageError(
+          absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
+                       "'"),
+          report_as_fatal_error);
+    } else {
+      flags_internal::ReportUsageError(
+          absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
+                       "'. Did you mean: ",
+                       absl::StrJoin(misspelling_hints, ", "), " ?"),
+          report_as_fatal_error);
+    }
+  }
+}
+
 }  // namespace
 
 // --------------------------------------------------------------------
@@ -605,55 +647,144 @@
 
 // --------------------------------------------------------------------
 
+struct BestHints {
+  explicit BestHints(uint8_t _max) : best_distance(_max + 1) {}
+  bool AddHint(absl::string_view hint, uint8_t distance) {
+    if (hints.size() >= kMaxHints) return false;
+    if (distance == best_distance) {
+      hints.emplace_back(hint);
+    }
+    if (distance < best_distance) {
+      best_distance = distance;
+      hints = std::vector<std::string>{std::string(hint)};
+    }
+    return true;
+  }
+
+  uint8_t best_distance;
+  std::vector<std::string> hints;
+};
+
+// Return the list of flags with the smallest Damerau-Levenshtein distance to
+// the given flag.
+std::vector<std::string> GetMisspellingHints(const absl::string_view flag) {
+  const size_t maxCutoff = std::min(flag.size() / 2 + 1, kMaxDistance);
+  auto undefok = absl::GetFlag(FLAGS_undefok);
+  BestHints best_hints(static_cast<uint8_t>(maxCutoff));
+  flags_internal::ForEachFlag([&](const CommandLineFlag& f) {
+    if (best_hints.hints.size() >= kMaxHints) return;
+    uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance(
+        flag, f.Name(), best_hints.best_distance);
+    best_hints.AddHint(f.Name(), distance);
+    // For boolean flags, also calculate distance to the negated form.
+    if (f.IsOfType<bool>()) {
+      const std::string negated_flag = absl::StrCat("no", f.Name());
+      distance = strings_internal::CappedDamerauLevenshteinDistance(
+          flag, negated_flag, best_hints.best_distance);
+      best_hints.AddHint(negated_flag, distance);
+    }
+  });
+  // Finally calculate distance to flags in "undefok".
+  absl::c_for_each(undefok, [&](const absl::string_view f) {
+    if (best_hints.hints.size() >= kMaxHints) return;
+    uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance(
+        flag, f, best_hints.best_distance);
+    best_hints.AddHint(absl::StrCat(f, " (undefok)"), distance);
+  });
+  return best_hints.hints;
+}
+
+// --------------------------------------------------------------------
+
 std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
-                                        ArgvListAction arg_list_act,
-                                        UsageFlagsAction usage_flag_act,
-                                        OnUndefinedFlag on_undef_flag) {
+                                        UsageFlagsAction usage_flag_action,
+                                        OnUndefinedFlag undef_flag_action,
+                                        std::ostream& error_help_output) {
+  std::vector<char*> positional_args;
+  std::vector<UnrecognizedFlag> unrecognized_flags;
+
+  auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
+      argc, argv, positional_args, unrecognized_flags, usage_flag_action);
+
+  if (undef_flag_action != OnUndefinedFlag::kIgnoreUndefined) {
+    flags_internal::ReportUnrecognizedFlags(
+        unrecognized_flags,
+        (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined));
+
+    if (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined) {
+      if (!unrecognized_flags.empty()) {
+        flags_internal::HandleUsageFlags(error_help_output,
+        ProgramUsageMessage()); std::exit(1);
+      }
+    }
+  }
+
+  flags_internal::MaybeExit(help_mode);
+
+  return positional_args;
+}
+
+// --------------------------------------------------------------------
+
+// This function handles all Abseil Flags and built-in usage flags and, if any
+// help mode was handled, it returns that help mode. The caller of this function
+// can decide to exit based on the returned help mode.
+// The caller may decide to handle unrecognized positional arguments and
+// unrecognized flags first before exiting.
+//
+// Returns:
+// * HelpMode::kFull if parsing errors were detected in recognized arguments
+// * The HelpMode that was handled in case when `usage_flag_action` is
+//   UsageFlagsAction::kHandleUsage and a usage flag was specified on the
+//   commandline
+// * Otherwise it returns HelpMode::kNone
+HelpMode ParseAbseilFlagsOnlyImpl(
+    int argc, char* argv[], std::vector<char*>& positional_args,
+    std::vector<UnrecognizedFlag>& unrecognized_flags,
+    UsageFlagsAction usage_flag_action) {
   ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]");
 
-  // This routine does not return anything since we abort on failure.
-  CheckDefaultValuesParsingRoundtrip();
+  using flags_internal::ArgsList;
+  using flags_internal::specified_flags;
 
   std::vector<std::string> flagfile_value;
-
   std::vector<ArgsList> input_args;
+
+  // Once parsing has started we will not allow more flag registrations.
+  flags_internal::FinalizeRegistry();
+
+  // This routine does not return anything since we abort on failure.
+  flags_internal::CheckDefaultValuesParsingRoundtrip();
+
   input_args.push_back(ArgsList(argc, argv));
 
-  std::vector<char*> output_args;
-  std::vector<char*> positional_args;
-  output_args.reserve(argc);
-
-  // This is the list of undefined flags. The element of the list is the pair
-  // consisting of boolean indicating if flag came from command line (vs from
-  // some flag file we've read) and flag name.
-  // TODO(rogeeff): Eliminate the first element in the pair after cleanup.
-  std::vector<std::pair<bool, std::string>> undefined_flag_names;
-
   // Set program invocation name if it is not set before.
-  if (ProgramInvocationName() == "UNKNOWN") {
+  if (flags_internal::ProgramInvocationName() == "UNKNOWN") {
     flags_internal::SetProgramInvocationName(argv[0]);
   }
-  output_args.push_back(argv[0]);
+  positional_args.push_back(argv[0]);
 
-  absl::MutexLock l(&specified_flags_guard);
+  absl::MutexLock l(&flags_internal::specified_flags_guard);
   if (specified_flags == nullptr) {
     specified_flags = new std::vector<const CommandLineFlag*>;
   } else {
     specified_flags->clear();
   }
 
-  // Iterate through the list of the input arguments. First level are arguments
-  // originated from argc/argv. Following levels are arguments originated from
-  // recursive parsing of flagfile(s).
+  // Iterate through the list of the input arguments. First level are
+  // arguments originated from argc/argv. Following levels are arguments
+  // originated from recursive parsing of flagfile(s).
   bool success = true;
   while (!input_args.empty()) {
-    // 10. First we process the built-in generator flags.
-    success &= HandleGeneratorFlags(input_args, flagfile_value);
+    // First we process the built-in generator flags.
+    success &= flags_internal::HandleGeneratorFlags(input_args, flagfile_value);
 
-    // 30. Select top-most (most recent) arguments list. If it is empty drop it
+    // Select top-most (most recent) arguments list. If it is empty drop it
     // and re-try.
     ArgsList& curr_list = input_args.back();
 
+    // Every ArgsList starts with real or fake program name, so we can always
+    // start by skipping it.
     curr_list.PopFront();
 
     if (curr_list.Size() == 0) {
@@ -661,13 +792,13 @@
       continue;
     }
 
-    // 40. Pick up the front remaining argument in the current list. If current
-    // stack of argument lists contains only one element - we are processing an
-    // argument from the original argv.
+    // Handle the next argument in the current list. If the stack of argument
+    // lists contains only one element - we are processing an argument from
+    // the original argv.
     absl::string_view arg(curr_list.Front());
     bool arg_from_argv = input_args.size() == 1;
 
-    // 50. If argument does not start with - or is just "-" - this is
+    // If argument does not start with '-' or is just "-" - this is
     // positional argument.
     if (!absl::ConsumePrefix(&arg, "-") || arg.empty()) {
       ABSL_INTERNAL_CHECK(arg_from_argv,
@@ -677,12 +808,8 @@
       continue;
     }
 
-    if (arg_from_argv && (arg_list_act == ArgvListAction::kKeepParsedArgs)) {
-      output_args.push_back(argv[curr_list.FrontIndex()]);
-    }
-
-    // 60. Split the current argument on '=' to figure out the argument
-    // name and value. If flag name is empty it means we've got "--". value
+    // Split the current argument on '=' to deduce the argument flag name and
+    // value. If flag name is empty it means we've got an "--" argument. Value
     // can be empty either if there were no '=' in argument string at all or
     // an argument looked like "--foo=". In a latter case is_empty_value is
     // true.
@@ -690,10 +817,11 @@
     absl::string_view value;
     bool is_empty_value = false;
 
-    std::tie(flag_name, value, is_empty_value) = SplitNameAndValue(arg);
+    std::tie(flag_name, value, is_empty_value) =
+        flags_internal::SplitNameAndValue(arg);
 
-    // 70. "--" alone means what it does for GNU: stop flags parsing. We do
-    // not support positional arguments in flagfiles, so we just drop them.
+    // Standalone "--" argument indicates that the rest of the arguments are
+    // positional. We do not support positional arguments in flagfiles.
     if (flag_name.empty()) {
       ABSL_INTERNAL_CHECK(arg_from_argv,
                           "Flagfile cannot contain positional argument");
@@ -702,38 +830,36 @@
       break;
     }
 
-    // 80. Locate the flag based on flag name. Handle both --foo and --nofoo
+    // Locate the flag based on flag name. Handle both --foo and --nofoo.
     CommandLineFlag* flag = nullptr;
     bool is_negative = false;
-    std::tie(flag, is_negative) = LocateFlag(flag_name);
+    std::tie(flag, is_negative) = flags_internal::LocateFlag(flag_name);
 
     if (flag == nullptr) {
-      if (on_undef_flag != OnUndefinedFlag::kIgnoreUndefined) {
-        undefined_flag_names.emplace_back(arg_from_argv,
-                                          std::string(flag_name));
+      // Usage flags are not modeled as Abseil flags. Locate them separately.
+      if (flags_internal::DeduceUsageFlags(flag_name, value)) {
+        continue;
       }
+      unrecognized_flags.emplace_back(arg_from_argv
+                                          ? UnrecognizedFlag::kFromArgv
+                                          : UnrecognizedFlag::kFromFlagfile,
+                                      flag_name);
       continue;
     }
 
-    // 90. Deduce flag's value (from this or next argument)
-    auto curr_index = curr_list.FrontIndex();
+    // Deduce flag's value (from this or next argument).
     bool value_success = true;
-    std::tie(value_success, value) =
-        DeduceFlagValue(*flag, value, is_negative, is_empty_value, &curr_list);
+    std::tie(value_success, value) = flags_internal::DeduceFlagValue(
+        *flag, value, is_negative, is_empty_value, &curr_list);
     success &= value_success;
 
-    // If above call consumed an argument, it was a standalone value
-    if (arg_from_argv && (arg_list_act == ArgvListAction::kKeepParsedArgs) &&
-        (curr_index != curr_list.FrontIndex())) {
-      output_args.push_back(argv[curr_list.FrontIndex()]);
-    }
-
-    // 100. Set the located flag to a new new value, unless it is retired.
-    // Setting retired flag fails, but we ignoring it here while also reporting
-    // access to retired flag.
+    // Set the located flag to a new value, unless it is retired. Setting
+    // retired flag fails, but we ignoring it here while also reporting access
+    // to retired flag.
     std::string error;
     if (!flags_internal::PrivateHandleAccessor::ParseFrom(
-            *flag, value, SET_FLAGS_VALUE, kCommandLine, error)) {
+            *flag, value, flags_internal::SET_FLAGS_VALUE,
+            flags_internal::kCommandLine, error)) {
       if (flag->IsRetired()) continue;
 
       flags_internal::ReportUsageError(error, true);
@@ -743,69 +869,73 @@
     }
   }
 
-  for (const auto& flag_name : undefined_flag_names) {
-    if (CanIgnoreUndefinedFlag(flag_name.second)) continue;
-
-    flags_internal::ReportUsageError(
-        absl::StrCat("Unknown command line flag '", flag_name.second, "'"),
-        true);
-
-    success = false;
-  }
-
-#if ABSL_FLAGS_STRIP_NAMES
-  if (!success) {
-    flags_internal::ReportUsageError(
-        "NOTE: command line flags are disabled in this build", true);
-  }
-#endif
-
-  if (!success) {
-    flags_internal::HandleUsageFlags(std::cout,
-                                     ProgramUsageMessage());
-    std::exit(1);
-  }
-
-  if (usage_flag_act == UsageFlagsAction::kHandleUsage) {
-    int exit_code = flags_internal::HandleUsageFlags(
-        std::cout, ProgramUsageMessage());
-
-    if (exit_code != -1) {
-      std::exit(exit_code);
-    }
-  }
-
-  ResetGeneratorFlags(flagfile_value);
-
-  // Reinstate positional args which were intermixed with flags in the arguments
-  // list.
-  for (auto arg : positional_args) {
-    output_args.push_back(arg);
-  }
+  flags_internal::ResetGeneratorFlags(flagfile_value);
 
   // All the remaining arguments are positional.
   if (!input_args.empty()) {
-    for (int arg_index = input_args.back().FrontIndex(); arg_index < argc;
-         ++arg_index) {
-      output_args.push_back(argv[arg_index]);
+    for (size_t arg_index = input_args.back().FrontIndex();
+         arg_index < static_cast<size_t>(argc); ++arg_index) {
+      positional_args.push_back(argv[arg_index]);
     }
   }
 
   // Trim and sort the vector.
   specified_flags->shrink_to_fit();
   std::sort(specified_flags->begin(), specified_flags->end(),
-            SpecifiedFlagsCompare{});
-  return output_args;
+            flags_internal::SpecifiedFlagsCompare{});
+
+  // Filter out unrecognized flags, which are ok to ignore.
+  std::vector<UnrecognizedFlag> filtered;
+  filtered.reserve(unrecognized_flags.size());
+  for (const auto& unrecognized : unrecognized_flags) {
+    if (flags_internal::CanIgnoreUndefinedFlag(unrecognized.flag_name))
+      continue;
+    filtered.push_back(unrecognized);
+  }
+
+  std::swap(unrecognized_flags, filtered);
+
+  if (!success) {
+#if ABSL_FLAGS_STRIP_NAMES
+    flags_internal::ReportUsageError(
+        "NOTE: command line flags are disabled in this build", true);
+#else
+    flags_internal::HandleUsageFlags(std::cerr, ProgramUsageMessage());
+#endif
+    return HelpMode::kFull;  // We just need to make sure the exit with
+                             // code 1.
+  }
+
+  return usage_flag_action == UsageFlagsAction::kHandleUsage
+             ? flags_internal::HandleUsageFlags(std::cout,
+                                                ProgramUsageMessage())
+             : HelpMode::kNone;
 }
 
 }  // namespace flags_internal
 
+void ParseAbseilFlagsOnly(int argc, char* argv[],
+                          std::vector<char*>& positional_args,
+                          std::vector<UnrecognizedFlag>& unrecognized_flags) {
+  auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
+      argc, argv, positional_args, unrecognized_flags,
+      flags_internal::UsageFlagsAction::kHandleUsage);
+
+  flags_internal::MaybeExit(help_mode);
+}
+
+// --------------------------------------------------------------------
+
+void ReportUnrecognizedFlags(
+    const std::vector<UnrecognizedFlag>& unrecognized_flags) {
+  flags_internal::ReportUnrecognizedFlags(unrecognized_flags, true);
+}
+
 // --------------------------------------------------------------------
 
 std::vector<char*> ParseCommandLine(int argc, char* argv[]) {
   return flags_internal::ParseCommandLineImpl(
-      argc, argv, flags_internal::ArgvListAction::kRemoveParsedArgs,
-      flags_internal::UsageFlagsAction::kHandleUsage,
+      argc, argv, flags_internal::UsageFlagsAction::kHandleUsage,
       flags_internal::OnUndefinedFlag::kAbortIfUndefined);
 }
 
diff --git a/abseil-cpp/absl/flags/parse.h b/abseil-cpp/absl/flags/parse.h
index 929de2c..f2a5cb1 100644
--- a/abseil-cpp/absl/flags/parse.h
+++ b/abseil-cpp/absl/flags/parse.h
@@ -23,6 +23,7 @@
 #ifndef ABSL_FLAGS_PARSE_H_
 #define ABSL_FLAGS_PARSE_H_
 
+#include <string>
 #include <vector>
 
 #include "absl/base/config.h"
@@ -31,27 +32,96 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
+// This type represent information about an unrecognized flag in the command
+// line.
+struct UnrecognizedFlag {
+  enum Source { kFromArgv, kFromFlagfile };
+
+  explicit UnrecognizedFlag(Source s, absl::string_view f)
+      : source(s), flag_name(f) {}
+  // This field indicates where we found this flag: on the original command line
+  // or read in some flag file.
+  Source source;
+  // Name of the flag we did not recognize in --flag_name=value or --flag_name.
+  std::string flag_name;
+};
+
+inline bool operator==(const UnrecognizedFlag& lhs,
+                       const UnrecognizedFlag& rhs) {
+  return lhs.source == rhs.source && lhs.flag_name == rhs.flag_name;
+}
+
+namespace flags_internal {
+
+HelpMode ParseAbseilFlagsOnlyImpl(
+    int argc, char* argv[], std::vector<char*>& positional_args,
+    std::vector<UnrecognizedFlag>& unrecognized_flags,
+    UsageFlagsAction usage_flag_action);
+
+}  // namespace flags_internal
+
+// ParseAbseilFlagsOnly()
+//
+// Parses a list of command-line arguments, passed in the `argc` and `argv[]`
+// parameters, into a set of Abseil Flag values, returning any unparsed
+// arguments in `positional_args` and `unrecognized_flags` output parameters.
+//
+// This function classifies all the arguments (including content of the
+// flagfiles, if any) into one of the following groups:
+//
+//   * arguments specified as "--flag=value" or "--flag value" that match
+//     registered or built-in Abseil Flags. These are "Abseil Flag arguments."
+//   * arguments specified as "--flag" that are unrecognized as Abseil Flags
+//   * arguments that are not specified as "--flag" are positional arguments
+//   * arguments that follow the flag-terminating delimiter (`--`) are also
+//     treated as positional arguments regardless of their syntax.
+//
+// All of the deduced Abseil Flag arguments are then parsed into their
+// corresponding flag values. If any syntax errors are found in these arguments,
+// the binary exits with code 1.
+//
+// This function also handles Abseil Flags built-in usage flags (e.g. --help)
+// if any were present on the command line.
+//
+// All the remaining positional arguments including original program name
+// (argv[0]) are are returned in the `positional_args` output parameter.
+//
+// All unrecognized flags that are not otherwise ignored are returned in the
+// `unrecognized_flags` output parameter. Note that the special `undefok`
+// flag allows you to specify flags which can be safely ignored; `undefok`
+// specifies these flags as a comma-separated list. Any unrecognized flags
+// that appear within `undefok` will therefore be ignored and not included in
+// the `unrecognized_flag` output parameter.
+//
+void ParseAbseilFlagsOnly(int argc, char* argv[],
+                          std::vector<char*>& positional_args,
+                          std::vector<UnrecognizedFlag>& unrecognized_flags);
+
+// ReportUnrecognizedFlags()
+//
+// Reports an error to `stderr` for all non-ignored unrecognized flags in
+// the provided `unrecognized_flags` list.
+void ReportUnrecognizedFlags(
+    const std::vector<UnrecognizedFlag>& unrecognized_flags);
+
 // ParseCommandLine()
 //
-// Parses the set of command-line arguments passed in the `argc` (argument
-// count) and `argv[]` (argument vector) parameters from `main()`, assigning
-// values to any defined Abseil flags. (Any arguments passed after the
-// flag-terminating delimiter (`--`) are treated as positional arguments and
-// ignored.)
+// First parses Abseil Flags only from the command line according to the
+// description in `ParseAbseilFlagsOnly`. In addition this function handles
+// unrecognized and usage flags.
 //
-// Any command-line flags (and arguments to those flags) are parsed into Abseil
-// Flag values, if those flags are defined. Any undefined flags will either
-// return an error, or be ignored if that flag is designated using `undefok` to
-// indicate "undefined is OK."
+// If any unrecognized flags are located they are reported using
+// `ReportUnrecognizedFlags`.
 //
-// Any command-line positional arguments not part of any command-line flag (or
-// arguments to a flag) are returned in a vector, with the program invocation
-// name at position 0 of that vector. (Note that this includes positional
-// arguments after the flag-terminating delimiter `--`.)
+// If any errors detected during command line parsing, this routine reports a
+// usage message and aborts the program.
 //
-// After all flags and flag arguments are parsed, this function looks for any
-// built-in usage flags (e.g. `--help`), and if any were specified, it reports
-// help messages and then exits the program.
+// If any built-in usage flags were specified on the command line (e.g.
+// `--help`), this function reports help messages and then gracefully exits the
+// program.
+//
+// This function returns all the remaining positional arguments collected by
+// `ParseAbseilFlagsOnly`.
 std::vector<char*> ParseCommandLine(int argc, char* argv[]);
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/flags/parse_test.cc b/abseil-cpp/absl/flags/parse_test.cc
index d35a6e4..97b7898 100644
--- a/abseil-cpp/absl/flags/parse_test.cc
+++ b/abseil-cpp/absl/flags/parse_test.cc
@@ -18,17 +18,18 @@
 #include <stdlib.h>
 
 #include <fstream>
+#include <iostream>
 #include <string>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/scoped_set_env.h"
-#include "absl/flags/declare.h"
 #include "absl/flags/flag.h"
 #include "absl/flags/internal/parse.h"
+#include "absl/flags/internal/usage.h"
 #include "absl/flags/reflection.h"
+#include "absl/log/log.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/string_view.h"
 #include "absl/strings/substitute.h"
@@ -38,6 +39,36 @@
 #include <windows.h>
 #endif
 
+// Define 125 similar flags to test kMaxHints for flag suggestions.
+#define FLAG_MULT(x) F3(x)
+#define TEST_FLAG_HEADER FLAG_HEADER_
+
+#define F(name) ABSL_FLAG(int, name, 0, "");
+
+#define F1(name) \
+  F(name##1);    \
+  F(name##2);    \
+  F(name##3);    \
+  F(name##4);    \
+  F(name##5);
+/**/
+#define F2(name) \
+  F1(name##1);   \
+  F1(name##2);   \
+  F1(name##3);   \
+  F1(name##4);   \
+  F1(name##5);
+/**/
+#define F3(name) \
+  F2(name##1);   \
+  F2(name##2);   \
+  F2(name##3);   \
+  F2(name##4);   \
+  F2(name##5);
+/**/
+
+FLAG_MULT(TEST_FLAG_HEADER)
+
 namespace {
 
 using absl::base_internal::ScopedSetEnv;
@@ -45,6 +76,7 @@
 struct UDT {
   UDT() = default;
   UDT(const UDT&) = default;
+  UDT& operator=(const UDT&) = default;
   UDT(int v) : value(v) {}  // NOLINT
 
   int value;
@@ -118,8 +150,7 @@
     }
 
     if (res->empty()) {
-      ABSL_INTERNAL_LOG(FATAL,
-                        "Failed to make temporary directory for data files");
+      LOG(FATAL) << "Failed to make temporary directory for data files";
     }
 
 #ifdef _WIN32
@@ -166,7 +197,7 @@
 // Builds flagfile flag in the flagfile_flag buffer and returns it. This
 // function also creates a temporary flagfile based on FlagfileData input.
 // We create a flagfile in a temporary directory with the name specified in
-// FlagfileData and populate it with lines specifed in FlagfileData. If $0 is
+// FlagfileData and populate it with lines specified in FlagfileData. If $0 is
 // referenced in any of the lines in FlagfileData they are replaced with
 // temporary directory location. This way we can test inclusion of one flagfile
 // from another flagfile.
@@ -204,9 +235,14 @@
 namespace {
 
 namespace flags = absl::flags_internal;
+using testing::AllOf;
 using testing::ElementsAreArray;
+using testing::HasSubstr;
 
 class ParseTest : public testing::Test {
+ public:
+  ~ParseTest() override { flags::SetFlagsHelpMode(flags::HelpMode::kNone); }
+
  private:
   absl::FlagSaver flag_saver_;
 };
@@ -214,6 +250,38 @@
 // --------------------------------------------------------------------
 
 template <int N>
+flags::HelpMode InvokeParseAbslOnlyImpl(const char* (&in_argv)[N]) {
+  std::vector<char*> positional_args;
+  std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+  return flags::ParseAbseilFlagsOnlyImpl(N, const_cast<char**>(in_argv),
+                                         positional_args, unrecognized_flags,
+                                         flags::UsageFlagsAction::kHandleUsage);
+}
+
+// --------------------------------------------------------------------
+
+template <int N>
+void InvokeParseAbslOnly(const char* (&in_argv)[N]) {
+  std::vector<char*> positional_args;
+  std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+  absl::ParseAbseilFlagsOnly(2, const_cast<char**>(in_argv), positional_args,
+                             unrecognized_flags);
+}
+
+// --------------------------------------------------------------------
+
+template <int N>
+std::vector<char*> InvokeParseCommandLineImpl(const char* (&in_argv)[N]) {
+  return flags::ParseCommandLineImpl(
+      N, const_cast<char**>(in_argv), flags::UsageFlagsAction::kHandleUsage,
+      flags::OnUndefinedFlag::kAbortIfUndefined, std::cerr);
+}
+
+// --------------------------------------------------------------------
+
+template <int N>
 std::vector<char*> InvokeParse(const char* (&in_argv)[N]) {
   return absl::ParseCommandLine(N, const_cast<char**>(in_argv));
 }
@@ -560,6 +628,49 @@
 
 // --------------------------------------------------------------------
 
+TEST_F(ParseDeathTest, TestFlagSuggestions) {
+  const char* in_args1[] = {
+      "testbin",
+      "--legacy_boo",
+  };
+  EXPECT_DEATH_IF_SUPPORTED(
+      InvokeParse(in_args1),
+      "Unknown command line flag 'legacy_boo'. Did you mean: legacy_bool ?");
+
+  const char* in_args2[] = {"testbin", "--foo", "--undefok=foo1"};
+  EXPECT_DEATH_IF_SUPPORTED(
+      InvokeParse(in_args2),
+      "Unknown command line flag 'foo'. Did you mean: foo1 \\(undefok\\)?");
+
+  const char* in_args3[] = {
+      "testbin",
+      "--nolegacy_ino",
+  };
+  EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3),
+                            "Unknown command line flag 'nolegacy_ino'. Did "
+                            "you mean: nolegacy_bool, legacy_int ?");
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, GetHints) {
+  EXPECT_THAT(absl::flags_internal::GetMisspellingHints("legacy_boo"),
+              testing::ContainerEq(std::vector<std::string>{"legacy_bool"}));
+  EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_itn"),
+              testing::ContainerEq(std::vector<std::string>{"legacy_int"}));
+  EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_int1"),
+              testing::ContainerEq(std::vector<std::string>{"legacy_int"}));
+  EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_int"),
+              testing::ContainerEq(std::vector<std::string>{"legacy_int"}));
+  EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_ino"),
+              testing::ContainerEq(
+                  std::vector<std::string>{"nolegacy_bool", "legacy_int"}));
+  EXPECT_THAT(
+      absl::flags_internal::GetMisspellingHints("FLAG_HEADER_000").size(), 100);
+}
+
+// --------------------------------------------------------------------
+
 TEST_F(ParseTest, TestLegacyFlags) {
   const char* in_args1[] = {
       "testbin",
@@ -775,88 +886,13 @@
 
 // --------------------------------------------------------------------
 
-TEST_F(ParseTest, TestKeepParsedArgs) {
-  const char* in_args1[] = {
-      "testbin",        "arg1", "--bool_flag",
-      "--int_flag=211", "arg2", "--double_flag=1.1",
-      "--string_flag",  "asd",  "--",
-      "arg3",           "arg4",
-  };
-
-  auto out_args1 = InvokeParse(in_args1);
-
-  EXPECT_THAT(
-      out_args1,
-      ElementsAreArray({absl::string_view("testbin"), absl::string_view("arg1"),
-                        absl::string_view("arg2"), absl::string_view("arg3"),
-                        absl::string_view("arg4")}));
-
-  auto out_args2 = flags::ParseCommandLineImpl(
-      11, const_cast<char**>(in_args1), flags::ArgvListAction::kKeepParsedArgs,
-      flags::UsageFlagsAction::kHandleUsage,
-      flags::OnUndefinedFlag::kAbortIfUndefined);
-
-  EXPECT_THAT(
-      out_args2,
-      ElementsAreArray({absl::string_view("testbin"),
-                        absl::string_view("--bool_flag"),
-                        absl::string_view("--int_flag=211"),
-                        absl::string_view("--double_flag=1.1"),
-                        absl::string_view("--string_flag"),
-                        absl::string_view("asd"), absl::string_view("--"),
-                        absl::string_view("arg1"), absl::string_view("arg2"),
-                        absl::string_view("arg3"), absl::string_view("arg4")}));
-}
-
-// --------------------------------------------------------------------
-
-TEST_F(ParseTest, TestIgnoreUndefinedFlags) {
-  const char* in_args1[] = {
-      "testbin",
-      "arg1",
-      "--undef_flag=aa",
-      "--int_flag=21",
-  };
-
-  auto out_args1 = flags::ParseCommandLineImpl(
-      4, const_cast<char**>(in_args1), flags::ArgvListAction::kRemoveParsedArgs,
-      flags::UsageFlagsAction::kHandleUsage,
-      flags::OnUndefinedFlag::kIgnoreUndefined);
-
-  EXPECT_THAT(out_args1, ElementsAreArray({absl::string_view("testbin"),
-                                           absl::string_view("arg1")}));
-
-  EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 21);
-
-  const char* in_args2[] = {
-      "testbin",
-      "arg1",
-      "--undef_flag=aa",
-      "--string_flag=AA",
-  };
-
-  auto out_args2 = flags::ParseCommandLineImpl(
-      4, const_cast<char**>(in_args2), flags::ArgvListAction::kKeepParsedArgs,
-      flags::UsageFlagsAction::kHandleUsage,
-      flags::OnUndefinedFlag::kIgnoreUndefined);
-
-  EXPECT_THAT(
-      out_args2,
-      ElementsAreArray(
-          {absl::string_view("testbin"), absl::string_view("--undef_flag=aa"),
-           absl::string_view("--string_flag=AA"), absl::string_view("arg1")}));
-
-  EXPECT_EQ(absl::GetFlag(FLAGS_string_flag), "AA");
-}
-
-// --------------------------------------------------------------------
-
-TEST_F(ParseDeathTest, TestHelpFlagHandling) {
+TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) {
   const char* in_args1[] = {
       "testbin",
       "--help",
   };
 
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kImportant);
   EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(1), "");
 
   const char* in_args2[] = {
@@ -865,12 +901,51 @@
       "--int_flag=3",
   };
 
-  auto out_args2 = flags::ParseCommandLineImpl(
-      3, const_cast<char**>(in_args2), flags::ArgvListAction::kRemoveParsedArgs,
-      flags::UsageFlagsAction::kIgnoreUsage,
-      flags::OnUndefinedFlag::kAbortIfUndefined);
-
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kImportant);
   EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 3);
+
+  const char* in_args3[] = {"testbin", "--help", "some_positional_arg"};
+
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args3), flags::HelpMode::kImportant);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, TestSubstringHelpFlagHandling) {
+  const char* in_args1[] = {
+      "testbin",
+      "--help=abcd",
+  };
+
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kMatch);
+  EXPECT_EQ(flags::GetFlagsHelpMatchSubstr(), "abcd");
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseDeathTest, TestVersionHandling) {
+  const char* in_args1[] = {
+      "testbin",
+      "--version",
+  };
+
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kVersion);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, TestCheckArgsHandling) {
+  const char* in_args1[] = {"testbin", "--only_check_args", "--int_flag=211"};
+
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kOnlyCheckArgs);
+  EXPECT_EXIT(InvokeParseAbslOnly(in_args1), testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(0), "");
+
+  const char* in_args2[] = {"testbin", "--only_check_args", "--unknown_flag=a"};
+
+  EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kOnlyCheckArgs);
+  EXPECT_EXIT(InvokeParseAbslOnly(in_args2), testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(InvokeParse(in_args2), testing::ExitedWithCode(1), "");
 }
 
 // --------------------------------------------------------------------
@@ -895,4 +970,118 @@
 
 // --------------------------------------------------------------------
 
+TEST_F(ParseTest, ParseAbseilFlagsOnlySuccess) {
+  const char* in_args[] = {
+      "testbin",
+      "arg1",
+      "--bool_flag",
+      "--int_flag=211",
+      "arg2",
+      "--double_flag=1.1",
+      "--undef_flag1",
+      "--undef_flag2=123",
+      "--string_flag",
+      "asd",
+      "--",
+      "--some_flag",
+      "arg4",
+  };
+
+  std::vector<char*> positional_args;
+  std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+  absl::ParseAbseilFlagsOnly(13, const_cast<char**>(in_args), positional_args,
+                             unrecognized_flags);
+  EXPECT_THAT(positional_args,
+              ElementsAreArray(
+                  {absl::string_view("testbin"), absl::string_view("arg1"),
+                   absl::string_view("arg2"), absl::string_view("--some_flag"),
+                   absl::string_view("arg4")}));
+  EXPECT_THAT(unrecognized_flags,
+              ElementsAreArray(
+                  {absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+                                          "undef_flag1"),
+                   absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+                                          "undef_flag2")}));
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseDeathTest, ParseAbseilFlagsOnlyFailure) {
+  const char* in_args[] = {
+      "testbin",
+      "--int_flag=21.1",
+  };
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      InvokeParseAbslOnly(in_args),
+      "Illegal value '21.1' specified for flag 'int_flag'");
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, UndefOkFlagsAreIgnored) {
+  const char* in_args[] = {
+      "testbin",           "--undef_flag1",
+      "--undef_flag2=123", "--undefok=undef_flag2",
+      "--undef_flag3",     "value",
+  };
+
+  std::vector<char*> positional_args;
+  std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+  absl::ParseAbseilFlagsOnly(6, const_cast<char**>(in_args), positional_args,
+                             unrecognized_flags);
+  EXPECT_THAT(positional_args, ElementsAreArray({absl::string_view("testbin"),
+                                                 absl::string_view("value")}));
+  EXPECT_THAT(unrecognized_flags,
+              ElementsAreArray(
+                  {absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+                                          "undef_flag1"),
+                   absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+                                          "undef_flag3")}));
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, AllUndefOkFlagsAreIgnored) {
+  const char* in_args[] = {
+      "testbin",
+      "--undef_flag1",
+      "--undef_flag2=123",
+      "--undefok=undef_flag2,undef_flag1,undef_flag3",
+      "--undef_flag3",
+      "value",
+      "--",
+      "--undef_flag4",
+  };
+
+  std::vector<char*> positional_args;
+  std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+  absl::ParseAbseilFlagsOnly(8, const_cast<char**>(in_args), positional_args,
+                             unrecognized_flags);
+  EXPECT_THAT(positional_args,
+              ElementsAreArray({absl::string_view("testbin"),
+                                absl::string_view("value"),
+                                absl::string_view("--undef_flag4")}));
+  EXPECT_THAT(unrecognized_flags, testing::IsEmpty());
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseDeathTest, ExitOnUnrecognizedFlagPrintsHelp) {
+  const char* in_args[] = {
+      "testbin",
+      "--undef_flag1",
+      "--help=int_flag",
+  };
+
+  EXPECT_EXIT(InvokeParseCommandLineImpl(in_args), testing::ExitedWithCode(1),
+              AllOf(HasSubstr("Unknown command line flag 'undef_flag1'"),
+                    HasSubstr("Try --helpfull to get a list of all flags")));
+}
+
+// --------------------------------------------------------------------
+
 }  // namespace
diff --git a/abseil-cpp/absl/flags/reflection.cc b/abseil-cpp/absl/flags/reflection.cc
index d706022..dbce403 100644
--- a/abseil-cpp/absl/flags/reflection.cc
+++ b/abseil-cpp/absl/flags/reflection.cc
@@ -17,11 +17,12 @@
 
 #include <assert.h>
 
-#include <map>
+#include <atomic>
 #include <string>
 
 #include "absl/base/config.h"
 #include "absl/base/thread_annotations.h"
+#include "absl/container/flat_hash_map.h"
 #include "absl/flags/commandlineflag.h"
 #include "absl/flags/internal/private_handle_accessor.h"
 #include "absl/flags/internal/registry.h"
@@ -49,28 +50,30 @@
   ~FlagRegistry() = default;
 
   // Store a flag in this registry. Takes ownership of *flag.
-  void RegisterFlag(CommandLineFlag& flag);
+  void RegisterFlag(CommandLineFlag& flag, const char* filename);
 
   void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.Lock(); }
   void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.Unlock(); }
 
   // Returns the flag object for the specified name, or nullptr if not found.
   // Will emit a warning if a 'retired' flag is specified.
-  CommandLineFlag* FindFlagLocked(absl::string_view name);
+  CommandLineFlag* FindFlag(absl::string_view name);
 
   static FlagRegistry& GlobalRegistry();  // returns a singleton registry
 
  private:
   friend class flags_internal::FlagSaverImpl;  // reads all the flags in order
                                                // to copy them
-  friend void ForEachFlagUnlocked(
-      std::function<void(CommandLineFlag&)> visitor);
+  friend void ForEachFlag(std::function<void(CommandLineFlag&)> visitor);
+  friend void FinalizeRegistry();
 
-  // The map from name to flag, for FindFlagLocked().
-  using FlagMap = std::map<absl::string_view, CommandLineFlag*>;
+  // The map from name to flag, for FindFlag().
+  using FlagMap = absl::flat_hash_map<absl::string_view, CommandLineFlag*>;
   using FlagIterator = FlagMap::iterator;
   using FlagConstIterator = FlagMap::const_iterator;
   FlagMap flags_;
+  std::vector<CommandLineFlag*> flat_flags_;
+  std::atomic<bool> finalized_flags_{false};
 
   absl::Mutex lock_;
 
@@ -79,15 +82,6 @@
   FlagRegistry& operator=(const FlagRegistry&);
 };
 
-CommandLineFlag* FlagRegistry::FindFlagLocked(absl::string_view name) {
-  FlagConstIterator i = flags_.find(name);
-  if (i == flags_.end()) {
-    return nullptr;
-  }
-
-  return i->second;
-}
-
 namespace {
 
 class FlagRegistryLock {
@@ -101,8 +95,37 @@
 
 }  // namespace
 
-void FlagRegistry::RegisterFlag(CommandLineFlag& flag) {
+CommandLineFlag* FlagRegistry::FindFlag(absl::string_view name) {
+  if (finalized_flags_.load(std::memory_order_acquire)) {
+    // We could save some gcus here if we make `Name()` be non-virtual.
+    // We could move the `const char*` name to the base class.
+    auto it = std::partition_point(
+        flat_flags_.begin(), flat_flags_.end(),
+        [=](CommandLineFlag* f) { return f->Name() < name; });
+    if (it != flat_flags_.end() && (*it)->Name() == name) return *it;
+  }
+
+  FlagRegistryLock frl(*this);
+  auto it = flags_.find(name);
+  return it != flags_.end() ? it->second : nullptr;
+}
+
+void FlagRegistry::RegisterFlag(CommandLineFlag& flag, const char* filename) {
+  if (filename != nullptr &&
+      flag.Filename() != GetUsageConfig().normalize_filename(filename)) {
+    flags_internal::ReportUsageError(
+        absl::StrCat(
+            "Inconsistency between flag object and registration for flag '",
+            flag.Name(),
+            "', likely due to duplicate flags or an ODR violation. Relevant "
+            "files: ",
+            flag.Filename(), " and ", filename),
+        true);
+    std::exit(1);
+  }
+
   FlagRegistryLock registry_lock(*this);
+
   std::pair<FlagIterator, bool> ins =
       flags_.insert(FlagMap::value_type(flag.Name(), &flag));
   if (ins.second == false) {  // means the name was already in the map
@@ -152,27 +175,43 @@
 
 // --------------------------------------------------------------------
 
-void ForEachFlagUnlocked(std::function<void(CommandLineFlag&)> visitor) {
-  FlagRegistry& registry = FlagRegistry::GlobalRegistry();
-  for (FlagRegistry::FlagConstIterator i = registry.flags_.begin();
-       i != registry.flags_.end(); ++i) {
-    visitor(*i->second);
-  }
-}
-
 void ForEachFlag(std::function<void(CommandLineFlag&)> visitor) {
   FlagRegistry& registry = FlagRegistry::GlobalRegistry();
+
+  if (registry.finalized_flags_.load(std::memory_order_acquire)) {
+    for (const auto& i : registry.flat_flags_) visitor(*i);
+  }
+
   FlagRegistryLock frl(registry);
-  ForEachFlagUnlocked(visitor);
+  for (const auto& i : registry.flags_) visitor(*i.second);
 }
 
 // --------------------------------------------------------------------
 
-bool RegisterCommandLineFlag(CommandLineFlag& flag) {
-  FlagRegistry::GlobalRegistry().RegisterFlag(flag);
+bool RegisterCommandLineFlag(CommandLineFlag& flag, const char* filename) {
+  FlagRegistry::GlobalRegistry().RegisterFlag(flag, filename);
   return true;
 }
 
+void FinalizeRegistry() {
+  auto& registry = FlagRegistry::GlobalRegistry();
+  FlagRegistryLock frl(registry);
+  if (registry.finalized_flags_.load(std::memory_order_relaxed)) {
+    // Was already finalized. Ignore the second time.
+    return;
+  }
+  registry.flat_flags_.reserve(registry.flags_.size());
+  for (const auto& f : registry.flags_) {
+    registry.flat_flags_.push_back(f.second);
+  }
+  std::sort(std::begin(registry.flat_flags_), std::end(registry.flat_flags_),
+            [](const CommandLineFlag* lhs, const CommandLineFlag* rhs) {
+              return lhs->Name() < rhs->Name();
+            });
+  registry.flags_.clear();
+  registry.finalized_flags_.store(true, std::memory_order_release);
+}
+
 // --------------------------------------------------------------------
 
 namespace {
@@ -244,7 +283,7 @@
   static_assert(alignof(RetiredFlagObj) == kRetiredFlagObjAlignment, "");
   auto* flag = ::new (static_cast<void*>(buf))
       flags_internal::RetiredFlagObj(name, type_id);
-  FlagRegistry::GlobalRegistry().RegisterFlag(*flag);
+  FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr);
 }
 
 // --------------------------------------------------------------------
@@ -298,9 +337,7 @@
   if (name.empty()) return nullptr;
   flags_internal::FlagRegistry& registry =
       flags_internal::FlagRegistry::GlobalRegistry();
-  flags_internal::FlagRegistryLock frl(registry);
-
-  return registry.FindFlagLocked(name);
+  return registry.FindFlag(name);
 }
 
 // --------------------------------------------------------------------
@@ -308,7 +345,7 @@
 absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> GetAllFlags() {
   absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> res;
   flags_internal::ForEachFlag([&](CommandLineFlag& flag) {
-    res.insert({flag.Name(), &flag});
+    if (!flag.IsRetired()) res.insert({flag.Name(), &flag});
   });
   return res;
 }
diff --git a/abseil-cpp/absl/flags/reflection.h b/abseil-cpp/absl/flags/reflection.h
index 4ce0ab6..e6baf5d 100644
--- a/abseil-cpp/absl/flags/reflection.h
+++ b/abseil-cpp/absl/flags/reflection.h
@@ -64,7 +64,7 @@
 //   void MyFunc() {
 //    absl::FlagSaver fs;
 //    ...
-//    absl::SetFlag(FLAGS_myFlag, otherValue);
+//    absl::SetFlag(&FLAGS_myFlag, otherValue);
 //    ...
 //  } // scope of FlagSaver left, flags return to previous state
 //
diff --git a/abseil-cpp/absl/flags/reflection_test.cc b/abseil-cpp/absl/flags/reflection_test.cc
index 1a1dcb4..79cfa90 100644
--- a/abseil-cpp/absl/flags/reflection_test.cc
+++ b/abseil-cpp/absl/flags/reflection_test.cc
@@ -32,12 +32,8 @@
 ABSL_FLAG(std::string, string_flag, "dflt", "string_flag help");
 ABSL_RETIRED_FLAG(bool, bool_retired_flag, false, "bool_retired_flag help");
 
-ABSL_DECLARE_FLAG(bool, help);
-
 namespace {
 
-namespace flags = absl::flags_internal;
-
 class ReflectionTest : public testing::Test {
  protected:
   void SetUp() override { flag_saver_ = absl::make_unique<absl::FlagSaver>(); }
@@ -66,12 +62,9 @@
 // --------------------------------------------------------------------
 
 TEST_F(ReflectionTest, TestGetAllFlags) {
-  (void)absl::GetFlag(FLAGS_help);  // Force linking of usage flags.
-
   auto all_flags = absl::GetAllFlags();
   EXPECT_NE(all_flags.find("int_flag"), all_flags.end());
-  EXPECT_NE(all_flags.find("bool_retired_flag"), all_flags.end());
-  EXPECT_NE(all_flags.find("help"), all_flags.end());
+  EXPECT_EQ(all_flags.find("bool_retired_flag"), all_flags.end());
   EXPECT_EQ(all_flags.find("some_undefined_flag"), all_flags.end());
 
   std::vector<absl::string_view> flag_names_first_attempt;
diff --git a/abseil-cpp/absl/flags/usage.cc b/abseil-cpp/absl/flags/usage.cc
index 452f667..267a503 100644
--- a/abseil-cpp/absl/flags/usage.cc
+++ b/abseil-cpp/absl/flags/usage.cc
@@ -21,6 +21,7 @@
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/const_init.h"
+#include "absl/base/internal/raw_logging.h"
 #include "absl/base/thread_annotations.h"
 #include "absl/flags/internal/usage.h"
 #include "absl/strings/string_view.h"
diff --git a/abseil-cpp/absl/flags/usage_config.cc b/abseil-cpp/absl/flags/usage_config.cc
index ae2f548..5d7426d 100644
--- a/abseil-cpp/absl/flags/usage_config.cc
+++ b/abseil-cpp/absl/flags/usage_config.cc
@@ -34,7 +34,8 @@
 
 // Additional report of fatal usage error message before we std::exit. Error is
 // fatal if is_fatal argument to ReportUsageError is true.
-ABSL_ATTRIBUTE_WEAK void AbslInternalReportFatalUsageError(absl::string_view) {}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalReportFatalUsageError)(absl::string_view) {}
 
 }  // extern "C"
 
@@ -128,7 +129,7 @@
   std::cerr << "ERROR: " << msg << std::endl;
 
   if (is_fatal) {
-    AbslInternalReportFatalUsageError(msg);
+    ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(msg);
   }
 }
 
diff --git a/abseil-cpp/absl/flags/usage_config.h b/abseil-cpp/absl/flags/usage_config.h
index 96eecea..ded7030 100644
--- a/abseil-cpp/absl/flags/usage_config.h
+++ b/abseil-cpp/absl/flags/usage_config.h
@@ -127,7 +127,8 @@
 
 // Additional report of fatal usage error message before we std::exit. Error is
 // fatal if is_fatal argument to ReportUsageError is true.
-void AbslInternalReportFatalUsageError(absl::string_view);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(
+    absl::string_view);
 
 }  // extern "C"
 
diff --git a/abseil-cpp/absl/functional/BUILD.bazel b/abseil-cpp/absl/functional/BUILD.bazel
index ebd9b99..4ceac53 100644
--- a/abseil-cpp/absl/functional/BUILD.bazel
+++ b/abseil-cpp/absl/functional/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -27,6 +26,40 @@
 licenses(["notice"])
 
 cc_library(
+    name = "any_invocable",
+    srcs = ["internal/any_invocable.h"],
+    hdrs = ["any_invocable.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:base_internal",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/meta:type_traits",
+        "//absl/utility",
+    ],
+)
+
+cc_test(
+    name = "any_invocable_test",
+    srcs = [
+        "any_invocable_test.cc",
+        "internal/any_invocable.h",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":any_invocable",
+        "//absl/base:base_internal",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/meta:type_traits",
+        "//absl/utility",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
     name = "bind_front",
     srcs = ["internal/front_binder.h"],
     hdrs = ["bind_front.h"],
@@ -59,7 +92,9 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":any_invocable",
         "//absl/base:base_internal",
+        "//absl/base:core_headers",
         "//absl/meta:type_traits",
     ],
 )
@@ -70,6 +105,7 @@
     srcs = ["function_ref_test.cc"],
     copts = ABSL_TEST_COPTS,
     deps = [
+        ":any_invocable",
         ":function_ref",
         "//absl/container:test_instance_tracker",
         "//absl/memory",
@@ -78,14 +114,15 @@
 )
 
 cc_test(
-    name = "function_ref_benchmark",
+    name = "function_type_benchmark",
     srcs = [
-        "function_ref_benchmark.cc",
+        "function_type_benchmark.cc",
     ],
     copts = ABSL_TEST_COPTS,
     tags = ["benchmark"],
     visibility = ["//visibility:private"],
     deps = [
+        ":any_invocable",
         ":function_ref",
         "//absl/base:core_headers",
         "@com_github_google_benchmark//:benchmark_main",
diff --git a/abseil-cpp/absl/functional/CMakeLists.txt b/abseil-cpp/absl/functional/CMakeLists.txt
index cda914f..c704e04 100644
--- a/abseil-cpp/absl/functional/CMakeLists.txt
+++ b/abseil-cpp/absl/functional/CMakeLists.txt
@@ -16,6 +16,42 @@
 
 absl_cc_library(
   NAME
+    any_invocable
+  SRCS
+    "internal/any_invocable.h"
+  HDRS
+    "any_invocable.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base_internal
+    absl::config
+    absl::core_headers
+    absl::type_traits
+    absl::utility
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    any_invocable_test
+  SRCS
+    "any_invocable_test.cc"
+    "internal/any_invocable.h"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::any_invocable
+    absl::base_internal
+    absl::config
+    absl::core_headers
+    absl::type_traits
+    absl::utility
+    GTest::gmock_main
+)
+
+absl_cc_library(
+  NAME
     bind_front
   SRCS
     "internal/front_binder.h"
@@ -39,7 +75,7 @@
   DEPS
     absl::bind_front
     absl::memory
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -53,6 +89,8 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::base_internal
+    absl::core_headers
+    absl::any_invocable
     absl::meta
   PUBLIC
 )
@@ -68,5 +106,5 @@
     absl::function_ref
     absl::memory
     absl::test_instance_tracker
-    gmock_main
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/functional/any_invocable.h b/abseil-cpp/absl/functional/any_invocable.h
new file mode 100644
index 0000000..68d8825
--- /dev/null
+++ b/abseil-cpp/absl/functional/any_invocable.h
@@ -0,0 +1,324 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: any_invocable.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines an `absl::AnyInvocable` type that assumes ownership
+// and wraps an object of an invocable type. (Invocable types adhere to the
+// concept specified in https://en.cppreference.com/w/cpp/concepts/invocable.)
+//
+// In general, prefer `absl::AnyInvocable` when you need a type-erased
+// function parameter that needs to take ownership of the type.
+//
+// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function`
+// abstraction, but has a slightly different API and is not designed to be a
+// drop-in replacement or C++11-compatible backfill of that type.
+//
+// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original
+// implementation.
+
+#ifndef ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
+#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
+
+#include <cstddef>
+#include <initializer_list>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/functional/internal/any_invocable.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::AnyInvocable
+//
+// `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that
+// assumes ownership of an invocable object. Unlike `std::function`, an
+// `absl::AnyInvocable` is more type-safe and provides the following additional
+// benefits:
+//
+// * Properly adheres to const correctness of the underlying type
+// * Is move-only so avoids concurrency problems with copied invocables and
+//   unnecessary copies in general.
+// * Supports reference qualifiers allowing it to perform unique actions (noted
+//   below).
+//
+// `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation
+// may wrap any invocable object with a compatible function signature, e.g.
+// having arguments and return types convertible to types matching the
+// `absl::AnyInvocable` signature, and also matching any stated reference
+// qualifiers, as long as that type is moveable. It therefore provides broad
+// type erasure for functional objects.
+//
+// An `absl::AnyInvocable` is typically used as a type-erased function parameter
+// for accepting various functional objects:
+//
+// // Define a function taking an AnyInvocable parameter.
+// void my_func(absl::AnyInvocable<int()> f) {
+//   ...
+// };
+//
+// // That function can accept any invocable type:
+//
+// // Accept a function reference. We don't need to move a reference.
+// int func1() { return 0; };
+// my_func(func1);
+//
+// // Accept a lambda. We use std::move here because otherwise my_func would
+// // copy the lambda.
+// auto lambda = []() { return 0; };
+// my_func(std::move(lambda));
+//
+// // Accept a function pointer. We don't need to move a function pointer.
+// func2 = &func1;
+// my_func(func2);
+//
+// // Accept an std::function by moving it. Note that the lambda is copyable
+// // (satisfying std::function requirements) and moveable (satisfying
+// // absl::AnyInvocable requirements).
+// std::function<int()> func6 = []() { return 0; };
+// my_func(std::move(func6));
+//
+// `AnyInvocable` also properly respects `const` qualifiers, reference
+// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as
+// part of the user-specified function type (e.g.
+// `AnyInvocable<void()&& const noexcept>`). These qualifiers will be applied to
+// the `AnyInvocable` object's `operator()`, and the underlying invocable must
+// be compatible with those qualifiers.
+//
+// Comparison of const and non-const function types:
+//
+//   // Store a closure inside of `func` with the function type `int()`.
+//   // Note that we have made `func` itself `const`.
+//   const AnyInvocable<int()> func = [](){ return 0; };
+//
+//   func();  // Compile-error: the passed type `int()` isn't `const`.
+//
+//   // Store a closure inside of `const_func` with the function type
+//   // `int() const`.
+//   // Note that we have also made `const_func` itself `const`.
+//   const AnyInvocable<int() const> const_func = [](){ return 0; };
+//
+//   const_func();  // Fine: `int() const` is `const`.
+//
+// In the above example, the call `func()` would have compiled if
+// `std::function` were used even though the types are not const compatible.
+// This is a bug, and using `absl::AnyInvocable` properly detects that bug.
+//
+// In addition to affecting the signature of `operator()`, the `const` and
+// reference qualifiers of the function type also appropriately constrain which
+// kinds of invocable objects you are allowed to place into the `AnyInvocable`
+// instance. If you specify a function type that is const-qualified, then
+// anything that you attempt to put into the `AnyInvocable` must be callable on
+// a `const` instance of that type.
+//
+// Constraint example:
+//
+//   // Fine because the lambda is callable when `const`.
+//   AnyInvocable<int() const> func = [=](){ return 0; };
+//
+//   // This is a compile-error because the lambda isn't callable when `const`.
+//   AnyInvocable<int() const> error = [=]() mutable { return 0; };
+//
+// An `&&` qualifier can be used to express that an `absl::AnyInvocable`
+// instance should be invoked at most once:
+//
+//   // Invokes `continuation` with the logical result of an operation when
+//   // that operation completes (common in asynchronous code).
+//   void CallOnCompletion(AnyInvocable<void(int)&&> continuation) {
+//     int result_of_foo = foo();
+//
+//     // `std::move` is required because the `operator()` of `continuation` is
+//     // rvalue-reference qualified.
+//     std::move(continuation)(result_of_foo);
+//   }
+//
+// Attempting to call `absl::AnyInvocable` multiple times in such a case
+// results in undefined behavior.
+template <class Sig>
+class AnyInvocable : private internal_any_invocable::Impl<Sig> {
+ private:
+  static_assert(
+      std::is_function<Sig>::value,
+      "The template argument of AnyInvocable must be a function type.");
+
+  using Impl = internal_any_invocable::Impl<Sig>;
+
+ public:
+  // The return type of Sig
+  using result_type = typename Impl::result_type;
+
+  // Constructors
+
+  // Constructs the `AnyInvocable` in an empty state.
+  AnyInvocable() noexcept = default;
+  AnyInvocable(std::nullptr_t) noexcept {}  // NOLINT
+
+  // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move.
+  // Note that `f` is not guaranteed to be empty after move-construction,
+  // although it may be.
+  AnyInvocable(AnyInvocable&& /*f*/) noexcept = default;
+
+  // Constructs an `AnyInvocable` from an invocable object.
+  //
+  // Upon construction, `*this` is only empty if `f` is a function pointer or
+  // member pointer type and is null, or if `f` is an `AnyInvocable` that is
+  // empty.
+  template <class F, typename = absl::enable_if_t<
+                         internal_any_invocable::CanConvert<Sig, F>::value>>
+  AnyInvocable(F&& f)  // NOLINT
+      : Impl(internal_any_invocable::ConversionConstruct(),
+             std::forward<F>(f)) {}
+
+  // Constructs an `AnyInvocable` that holds an invocable object of type `T`,
+  // which is constructed in-place from the given arguments.
+  //
+  // Example:
+  //
+  //   AnyInvocable<int(int)> func(
+  //       absl::in_place_type<PossiblyImmovableType>, arg1, arg2);
+  //
+  template <class T, class... Args,
+            typename = absl::enable_if_t<
+                internal_any_invocable::CanEmplace<Sig, T, Args...>::value>>
+  explicit AnyInvocable(absl::in_place_type_t<T>, Args&&... args)
+      : Impl(absl::in_place_type<absl::decay_t<T>>,
+             std::forward<Args>(args)...) {
+    static_assert(std::is_same<T, absl::decay_t<T>>::value,
+                  "The explicit template argument of in_place_type is required "
+                  "to be an unqualified object type.");
+  }
+
+  // Overload of the above constructor to support list-initialization.
+  template <class T, class U, class... Args,
+            typename = absl::enable_if_t<internal_any_invocable::CanEmplace<
+                Sig, T, std::initializer_list<U>&, Args...>::value>>
+  explicit AnyInvocable(absl::in_place_type_t<T>,
+                        std::initializer_list<U> ilist, Args&&... args)
+      : Impl(absl::in_place_type<absl::decay_t<T>>, ilist,
+             std::forward<Args>(args)...) {
+    static_assert(std::is_same<T, absl::decay_t<T>>::value,
+                  "The explicit template argument of in_place_type is required "
+                  "to be an unqualified object type.");
+  }
+
+  // Assignment Operators
+
+  // Assigns an `AnyInvocable` through move-assignment.
+  // Note that `f` is not guaranteed to be empty after move-assignment
+  // although it may be.
+  AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default;
+
+  // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If
+  // not empty, destroys the target, putting `*this` into an empty state.
+  AnyInvocable& operator=(std::nullptr_t) noexcept {
+    this->Clear();
+    return *this;
+  }
+
+  // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance.
+  //
+  // Upon assignment, `*this` is only empty if `f` is a function pointer or
+  // member pointer type and is null, or if `f` is an `AnyInvocable` that is
+  // empty.
+  template <class F, typename = absl::enable_if_t<
+                         internal_any_invocable::CanAssign<Sig, F>::value>>
+  AnyInvocable& operator=(F&& f) {
+    *this = AnyInvocable(std::forward<F>(f));
+    return *this;
+  }
+
+  // Assigns an `AnyInvocable` from a reference to an invocable object.
+  // Upon assignment, stores a reference to the invocable object in the
+  // `AnyInvocable` instance.
+  template <
+      class F,
+      typename = absl::enable_if_t<
+          internal_any_invocable::CanAssignReferenceWrapper<Sig, F>::value>>
+  AnyInvocable& operator=(std::reference_wrapper<F> f) noexcept {
+    *this = AnyInvocable(f);
+    return *this;
+  }
+
+  // Destructor
+
+  // If not empty, destroys the target.
+  ~AnyInvocable() = default;
+
+  // absl::AnyInvocable::swap()
+  //
+  // Exchanges the targets of `*this` and `other`.
+  void swap(AnyInvocable& other) noexcept { std::swap(*this, other); }
+
+  // absl::AnyInvocable::operator bool()
+  //
+  // Returns `true` if `*this` is not empty.
+  //
+  // WARNING: An `AnyInvocable` that wraps an empty `std::function` is not
+  // itself empty. This behavior is consistent with the standard equivalent
+  // `std::move_only_function`.
+  //
+  // In other words:
+  //   std::function<void()> f;  // empty
+  //   absl::AnyInvocable<void()> a = std::move(f);  // not empty
+  explicit operator bool() const noexcept { return this->HasValue(); }
+
+  // Invokes the target object of `*this`. `*this` must not be empty.
+  //
+  // Note: The signature of this function call operator is the same as the
+  //       template parameter `Sig`.
+  using Impl::operator();
+
+  // Equality operators
+
+  // Returns `true` if `*this` is empty.
+  friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept {
+    return !f.HasValue();
+  }
+
+  // Returns `true` if `*this` is empty.
+  friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept {
+    return !f.HasValue();
+  }
+
+  // Returns `false` if `*this` is empty.
+  friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept {
+    return f.HasValue();
+  }
+
+  // Returns `false` if `*this` is empty.
+  friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept {
+    return f.HasValue();
+  }
+
+  // swap()
+  //
+  // Exchanges the targets of `f1` and `f2`.
+  friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept { f1.swap(f2); }
+
+ private:
+  // Friending other instantiations is necessary for conversions.
+  template <bool /*SigIsNoexcept*/, class /*ReturnType*/, class... /*P*/>
+  friend class internal_any_invocable::CoreImpl;
+};
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
diff --git a/abseil-cpp/absl/functional/any_invocable_test.cc b/abseil-cpp/absl/functional/any_invocable_test.cc
new file mode 100644
index 0000000..a740faa
--- /dev/null
+++ b/abseil-cpp/absl/functional/any_invocable_test.cc
@@ -0,0 +1,1719 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/functional/any_invocable.h"
+
+#include <cstddef>
+#include <initializer_list>
+#include <memory>
+#include <numeric>
+#include <type_traits>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+static_assert(absl::internal_any_invocable::kStorageSize >= sizeof(void*),
+              "These tests assume that the small object storage is at least "
+              "the size of a pointer.");
+
+namespace {
+
+// Helper macro used to avoid spelling `noexcept` in language versions older
+// than C++17, where it is not part of the type system, in order to avoid
+// compilation failures and internal compiler errors.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
+#else
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
+#endif
+
+// A dummy type we use when passing qualifiers to metafunctions
+struct _ {};
+
+template <class T>
+struct Wrapper {
+  template <class U,
+            class = absl::enable_if_t<std::is_convertible<U, T>::value>>
+  Wrapper(U&&);  // NOLINT
+};
+
+// This will cause a recursive trait instantiation if the SFINAE checks are
+// not ordered correctly for constructibility.
+static_assert(std::is_constructible<Wrapper<absl::AnyInvocable<void()>>,
+                                    Wrapper<absl::AnyInvocable<void()>>>::value,
+              "");
+
+// A metafunction that takes the cv and l-value reference qualifiers that were
+// associated with a function type (here passed via qualifiers of an object
+// type), and .
+template <class Qualifiers, class This>
+struct QualifiersForThisImpl {
+  static_assert(std::is_object<This>::value, "");
+  using type =
+      absl::conditional_t<std::is_const<Qualifiers>::value, const This, This>&;
+};
+
+template <class Qualifiers, class This>
+struct QualifiersForThisImpl<Qualifiers&, This>
+    : QualifiersForThisImpl<Qualifiers, This> {};
+
+template <class Qualifiers, class This>
+struct QualifiersForThisImpl<Qualifiers&&, This> {
+  static_assert(std::is_object<This>::value, "");
+  using type =
+      absl::conditional_t<std::is_const<Qualifiers>::value, const This, This>&&;
+};
+
+template <class Qualifiers, class This>
+using QualifiersForThis =
+    typename QualifiersForThisImpl<Qualifiers, This>::type;
+
+// A metafunction that takes the cv and l-value reference qualifier of T and
+// applies them to U's function type qualifiers.
+template <class T, class Fun>
+struct GiveQualifiersToFunImpl;
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T, R(P...)> {
+  using type =
+      absl::conditional_t<std::is_const<T>::value, R(P...) const, R(P...)>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&, R(P...)> {
+  using type =
+      absl::conditional_t<std::is_const<T>::value, R(P...) const&, R(P...)&>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&&, R(P...)> {
+  using type =
+      absl::conditional_t<std::is_const<T>::value, R(P...) const&&, R(P...) &&>;
+};
+
+// If noexcept is a part of the type system, then provide the noexcept forms.
+#if defined(__cpp_noexcept_function_type)
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T, R(P...) noexcept> {
+  using type = absl::conditional_t<std::is_const<T>::value,
+                                   R(P...) const noexcept, R(P...) noexcept>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&, R(P...) noexcept> {
+  using type =
+      absl::conditional_t<std::is_const<T>::value, R(P...) const & noexcept,
+                          R(P...) & noexcept>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&&, R(P...) noexcept> {
+  using type =
+      absl::conditional_t<std::is_const<T>::value, R(P...) const && noexcept,
+                          R(P...) && noexcept>;
+};
+
+#endif  // defined(__cpp_noexcept_function_type)
+
+template <class T, class Fun>
+using GiveQualifiersToFun = typename GiveQualifiersToFunImpl<T, Fun>::type;
+
+// This is used in template parameters to decide whether or not to use a type
+// that fits in the small object optimization storage.
+enum class ObjSize { small, large };
+
+// A base type that is used with classes as a means to insert an
+// appropriately-sized dummy datamember when Size is ObjSize::large so that the
+// user's class type is guaranteed to not fit in small object storage.
+template <ObjSize Size>
+struct TypeErasedPadding;
+
+template <>
+struct TypeErasedPadding<ObjSize::small> {};
+
+template <>
+struct TypeErasedPadding<ObjSize::large> {
+  char dummy_data[absl::internal_any_invocable::kStorageSize + 1] = {};
+};
+
+struct Int {
+  Int(int v) noexcept : value(v) {}  // NOLINT
+#ifndef _MSC_VER
+  Int(Int&&) noexcept {
+    // NOTE: Prior to C++17, this not being called requires optimizations to
+    //       take place when performing the top-level invocation. In practice,
+    //       most supported compilers perform this optimization prior to C++17.
+    std::abort();
+  }
+#else
+  Int(Int&& v) noexcept = default;
+#endif
+  operator int() && noexcept { return value; }  // NOLINT
+
+  int MemberFunctionAdd(int const& b, int c) noexcept {  // NOLINT
+    return value + b + c;
+  }
+
+  int value;
+};
+
+enum class Movable { no, yes, nothrow, trivial };
+
+enum class NothrowCall { no, yes };
+
+enum class Destructible { nothrow, trivial };
+
+enum class ObjAlign : std::size_t {
+  normal = absl::internal_any_invocable::kAlignment,
+  large = absl::internal_any_invocable::kAlignment * 2,
+};
+
+// A function-object template that has knobs for each property that can affect
+// how the object is stored in AnyInvocable.
+template <Movable Movability, Destructible Destructibility, class Qual,
+          NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add;
+
+#define ABSL_INTERNALS_ADD(qual)                                              \
+  template <NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>  \
+  struct alignas(static_cast<std::size_t>(Alignment))                         \
+      add<Movable::trivial, Destructible::trivial, _ qual, CallExceptionSpec, \
+          Size, Alignment> : TypeErasedPadding<Size> {                        \
+    explicit add(int state_init) : state(state_init) {}                       \
+    explicit add(std::initializer_list<int> state_init, int tail)             \
+        : state(std::accumulate(std::begin(state_init), std::end(state_init), \
+                                0) +                                          \
+                tail) {}                                                      \
+    add(add&& other) = default; /*NOLINT*/                                    \
+    Int operator()(int a, int b, int c) qual                                  \
+        ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) {  \
+      return state + a + b + c;                                               \
+    }                                                                         \
+    int state;                                                                \
+  };                                                                          \
+                                                                              \
+  template <NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>  \
+  struct alignas(static_cast<std::size_t>(Alignment))                         \
+      add<Movable::trivial, Destructible::nothrow, _ qual, CallExceptionSpec, \
+          Size, Alignment> : TypeErasedPadding<Size> {                        \
+    explicit add(int state_init) : state(state_init) {}                       \
+    explicit add(std::initializer_list<int> state_init, int tail)             \
+        : state(std::accumulate(std::begin(state_init), std::end(state_init), \
+                                0) +                                          \
+                tail) {}                                                      \
+    ~add() noexcept {}                                                        \
+    add(add&& other) = default; /*NOLINT*/                                    \
+    Int operator()(int a, int b, int c) qual                                  \
+        ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) {  \
+      return state + a + b + c;                                               \
+    }                                                                         \
+    int state;                                                                \
+  }
+
+// Explicitly specify an empty argument.
+// MSVC (at least up to _MSC_VER 1931, if not beyond) warns that
+// ABSL_INTERNALS_ADD() is an undefined zero-arg overload.
+#define ABSL_INTERNALS_NOARG
+ABSL_INTERNALS_ADD(ABSL_INTERNALS_NOARG);
+#undef ABSL_INTERNALS_NOARG
+
+ABSL_INTERNALS_ADD(const);
+ABSL_INTERNALS_ADD(&);
+ABSL_INTERNALS_ADD(const&);
+ABSL_INTERNALS_ADD(&&);       // NOLINT
+ABSL_INTERNALS_ADD(const&&);  // NOLINT
+
+#undef ABSL_INTERNALS_ADD
+
+template <Destructible Destructibility, class Qual,
+          NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add<Movable::no, Destructibility, Qual, CallExceptionSpec, Size,
+           Alignment> : private add<Movable::trivial, Destructibility, Qual,
+                                    CallExceptionSpec, Size, Alignment> {
+  using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
+                   Size, Alignment>;
+
+  explicit add(int state_init) : Base(state_init) {}
+
+  explicit add(std::initializer_list<int> state_init, int tail)
+      : Base(state_init, tail) {}
+
+  add(add&&) = delete;
+
+  using Base::operator();
+  using Base::state;
+};
+
+template <Destructible Destructibility, class Qual,
+          NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add<Movable::yes, Destructibility, Qual, CallExceptionSpec, Size,
+           Alignment> : private add<Movable::trivial, Destructibility, Qual,
+                                    CallExceptionSpec, Size, Alignment> {
+  using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
+                   Size, Alignment>;
+
+  explicit add(int state_init) : Base(state_init) {}
+
+  explicit add(std::initializer_list<int> state_init, int tail)
+      : Base(state_init, tail) {}
+
+  add(add&& other) noexcept(false) : Base(other.state) {}  // NOLINT
+
+  using Base::operator();
+  using Base::state;
+};
+
+template <Destructible Destructibility, class Qual,
+          NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add<Movable::nothrow, Destructibility, Qual, CallExceptionSpec, Size,
+           Alignment> : private add<Movable::trivial, Destructibility, Qual,
+                                    CallExceptionSpec, Size, Alignment> {
+  using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
+                   Size, Alignment>;
+
+  explicit add(int state_init) : Base(state_init) {}
+
+  explicit add(std::initializer_list<int> state_init, int tail)
+      : Base(state_init, tail) {}
+
+  add(add&& other) noexcept : Base(other.state) {}
+
+  using Base::operator();
+  using Base::state;
+};
+
+// Actual non-member functions rather than function objects
+Int add_function(Int&& a, int b, int c) noexcept { return a.value + b + c; }
+
+Int mult_function(Int&& a, int b, int c) noexcept { return a.value * b * c; }
+
+Int square_function(Int const&& a) noexcept { return a.value * a.value; }
+
+template <class Sig>
+using AnyInvocable = absl::AnyInvocable<Sig>;
+
+// Instantiations of this template contains all of the compile-time parameters
+// for a given instantiation of the AnyInvocable test suite.
+template <Movable Movability, Destructible Destructibility, class Qual,
+          NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct TestParams {
+  static constexpr Movable kMovability = Movability;
+  static constexpr Destructible kDestructibility = Destructibility;
+  using Qualifiers = Qual;
+  static constexpr NothrowCall kCallExceptionSpec = CallExceptionSpec;
+  static constexpr bool kIsNoexcept = kCallExceptionSpec == NothrowCall::yes;
+  static constexpr bool kIsRvalueQualified =
+      std::is_rvalue_reference<Qual>::value;
+  static constexpr ObjSize kSize = Size;
+  static constexpr ObjAlign kAlignment = Alignment;
+
+  // These types are used when testing with member object pointer Invocables
+  using UnqualifiedUnaryFunType = int(Int const&&)
+      ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes);
+  using UnaryFunType = GiveQualifiersToFun<Qualifiers, UnqualifiedUnaryFunType>;
+  using MemObjPtrType = int(Int::*);
+  using UnaryAnyInvType = AnyInvocable<UnaryFunType>;
+  using UnaryThisParamType = QualifiersForThis<Qualifiers, UnaryAnyInvType>;
+
+  template <class T>
+  static UnaryThisParamType ToUnaryThisParam(T&& fun) {
+    return static_cast<UnaryThisParamType>(fun);
+  }
+
+  // This function type intentionally uses 3 "kinds" of parameter types.
+  //     - A user-defined type
+  //     - A reference type
+  //     - A scalar type
+  //
+  // These were chosen because internal forwarding takes place on parameters
+  // differently depending based on type properties (scalars are forwarded by
+  // value).
+  using ResultType = Int;
+  using AnyInvocableFunTypeNotNoexcept = Int(Int, const int&, int);
+  using UnqualifiedFunType =
+      typename std::conditional<kIsNoexcept, Int(Int, const int&, int) noexcept,
+                                Int(Int, const int&, int)>::type;
+  using FunType = GiveQualifiersToFun<Qualifiers, UnqualifiedFunType>;
+  using MemFunPtrType =
+      typename std::conditional<kIsNoexcept,
+                                Int (Int::*)(const int&, int) noexcept,
+                                Int (Int::*)(const int&, int)>::type;
+  using AnyInvType = AnyInvocable<FunType>;
+  using AddType = add<kMovability, kDestructibility, Qualifiers,
+                      kCallExceptionSpec, kSize, kAlignment>;
+  using ThisParamType = QualifiersForThis<Qualifiers, AnyInvType>;
+
+  template <class T>
+  static ThisParamType ToThisParam(T&& fun) {
+    return static_cast<ThisParamType>(fun);
+  }
+
+  // These typedefs are used when testing void return type covariance.
+  using UnqualifiedVoidFunType =
+      typename std::conditional<kIsNoexcept,
+                                void(Int, const int&, int) noexcept,
+                                void(Int, const int&, int)>::type;
+  using VoidFunType = GiveQualifiersToFun<Qualifiers, UnqualifiedVoidFunType>;
+  using VoidAnyInvType = AnyInvocable<VoidFunType>;
+  using VoidThisParamType = QualifiersForThis<Qualifiers, VoidAnyInvType>;
+
+  template <class T>
+  static VoidThisParamType ToVoidThisParam(T&& fun) {
+    return static_cast<VoidThisParamType>(fun);
+  }
+
+  using CompatibleAnyInvocableFunType =
+      absl::conditional_t<std::is_rvalue_reference<Qual>::value,
+                          GiveQualifiersToFun<const _&&, UnqualifiedFunType>,
+                          GiveQualifiersToFun<const _&, UnqualifiedFunType>>;
+
+  using CompatibleAnyInvType = AnyInvocable<CompatibleAnyInvocableFunType>;
+
+  using IncompatibleInvocable =
+      absl::conditional_t<std::is_rvalue_reference<Qual>::value,
+                          GiveQualifiersToFun<_&, UnqualifiedFunType>(_::*),
+                          GiveQualifiersToFun<_&&, UnqualifiedFunType>(_::*)>;
+};
+
+// Given a member-pointer type, this metafunction yields the target type of the
+// pointer, not including the class-type. It is used to verify that the function
+// call operator of AnyInvocable has the proper signature, corresponding to the
+// function type that the user provided.
+template <class MemberPtrType>
+struct MemberTypeOfImpl;
+
+template <class Class, class T>
+struct MemberTypeOfImpl<T(Class::*)> {
+  using type = T;
+};
+
+template <class MemberPtrType>
+using MemberTypeOf = typename MemberTypeOfImpl<MemberPtrType>::type;
+
+template <class T, class = void>
+struct IsMemberSwappableImpl : std::false_type {
+  static constexpr bool kIsNothrow = false;
+};
+
+template <class T>
+struct IsMemberSwappableImpl<
+    T, absl::void_t<decltype(std::declval<T&>().swap(std::declval<T&>()))>>
+    : std::true_type {
+  static constexpr bool kIsNothrow =
+      noexcept(std::declval<T&>().swap(std::declval<T&>()));
+};
+
+template <class T>
+using IsMemberSwappable = IsMemberSwappableImpl<T>;
+
+template <class T>
+using IsNothrowMemberSwappable =
+    std::integral_constant<bool, IsMemberSwappableImpl<T>::kIsNothrow>;
+
+template <class T>
+class AnyInvTestBasic : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestBasic);
+
+TYPED_TEST_P(AnyInvTestBasic, DefaultConstruction) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+
+  EXPECT_TRUE(std::is_nothrow_default_constructible<AnyInvType>::value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullptr) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun = nullptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+
+  EXPECT_TRUE(
+      (std::is_nothrow_constructible<AnyInvType, std::nullptr_t>::value));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullFunctionPtr) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+  UnqualifiedFunType* const null_fun_ptr = nullptr;
+  AnyInvType fun = null_fun_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberFunctionPtr) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+  const MemFunPtrType null_mem_fun_ptr = nullptr;
+  AnyInvType fun = null_mem_fun_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberObjectPtr) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+  using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+  const MemObjPtrType null_mem_obj_ptr = nullptr;
+  UnaryAnyInvType fun = null_mem_obj_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberFunctionPtr) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun = &Int::MemberFunctionAdd;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberObjectPtr) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+
+  UnaryAnyInvType fun = &Int::value;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionFunctionReferenceDecay) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun = add_function;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableEmpty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+  CompatibleAnyInvType other;
+  AnyInvType fun = std::move(other);
+
+  EXPECT_FALSE(static_cast<bool>(other));  // NOLINT
+  EXPECT_EQ(other, nullptr);               // NOLINT
+  EXPECT_EQ(nullptr, other);               // NOLINT
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableNonempty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+  CompatibleAnyInvType other = &add_function;
+  AnyInvType fun = std::move(other);
+
+  EXPECT_FALSE(static_cast<bool>(other));  // NOLINT
+  EXPECT_EQ(other, nullptr);               // NOLINT
+  EXPECT_EQ(nullptr, other);               // NOLINT
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConversionToBool) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  {
+    AnyInvType fun;
+
+    // This tests contextually-convertible-to-bool.
+    EXPECT_FALSE(fun ? true : false);  // NOLINT
+
+    // Make sure that the conversion is not implicit.
+    EXPECT_TRUE(
+        (std::is_nothrow_constructible<bool, const AnyInvType&>::value));
+    EXPECT_FALSE((std::is_convertible<const AnyInvType&, bool>::value));
+  }
+
+  {
+    AnyInvType fun = &add_function;
+
+    // This tests contextually-convertible-to-bool.
+    EXPECT_TRUE(fun ? true : false);  // NOLINT
+  }
+}
+
+TYPED_TEST_P(AnyInvTestBasic, Invocation) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  using FunType = typename TypeParam::FunType;
+  using AnyInvCallType = MemberTypeOf<decltype(&AnyInvType::operator())>;
+
+  // Make sure the function call operator of AnyInvocable always has the
+  // type that was specified via the template argument.
+  EXPECT_TRUE((std::is_same<AnyInvCallType, FunType>::value));
+
+  AnyInvType fun = &add_function;
+
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceConstruction) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceConstructionInitializerList) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun(absl::in_place_type<AddType>, {1, 2, 3, 4}, 5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(39, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstruction) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+  AnyInvType fun(absl::in_place_type<UnqualifiedFunType*>, nullptr);
+
+  // In-place construction does not lead to empty.
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstructionValueInit) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+  AnyInvType fun(absl::in_place_type<UnqualifiedFunType*>);
+
+  // In-place construction does not lead to empty.
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstruction) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+  AnyInvType fun(absl::in_place_type<MemFunPtrType>, nullptr);
+
+  // In-place construction does not lead to empty.
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstructionValueInit) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+  AnyInvType fun(absl::in_place_type<MemFunPtrType>);
+
+  // In-place construction does not lead to empty.
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstruction) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+  using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+  UnaryAnyInvType fun(absl::in_place_type<MemObjPtrType>, nullptr);
+
+  // In-place construction does not lead to empty.
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstructionValueInit) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+  using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+  UnaryAnyInvType fun(absl::in_place_type<MemObjPtrType>);
+
+  // In-place construction does not lead to empty.
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceVoidCovarianceConstruction) {
+  using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  VoidAnyInvType fun(absl::in_place_type<AddType>, 5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromEmpty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType source_fun;
+  AnyInvType fun(std::move(source_fun));
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+
+  EXPECT_TRUE(std::is_nothrow_move_constructible<AnyInvType>::value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromNonEmpty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+  AnyInvType fun(std::move(source_fun));
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+  EXPECT_TRUE(std::is_nothrow_move_constructible<AnyInvType>::value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrEmpty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun;
+
+  EXPECT_TRUE(fun == nullptr);
+  EXPECT_TRUE(nullptr == fun);
+
+  EXPECT_FALSE(fun != nullptr);
+  EXPECT_FALSE(nullptr != fun);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrNonempty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+  EXPECT_FALSE(fun == nullptr);
+  EXPECT_FALSE(nullptr == fun);
+
+  EXPECT_TRUE(fun != nullptr);
+  EXPECT_TRUE(nullptr != fun);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ResultType) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using ExpectedResultType = typename TypeParam::ResultType;
+
+  EXPECT_TRUE((std::is_same<typename AnyInvType::result_type,
+                            ExpectedResultType>::value));
+}
+
+template <class T>
+class AnyInvTestCombinatoric : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestCombinatoric);
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType source_fun;
+  AnyInvType fun;
+
+  fun = std::move(source_fun);
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyLhsNonemptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+  AnyInvType fun;
+
+  fun = std::move(source_fun);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyEmptyLhsRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType source_fun;
+  AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+  fun = std::move(source_fun);
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyLhsNonemptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+  AnyInvType fun(absl::in_place_type<AddType>, 20);
+
+  fun = std::move(source_fun);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignEmpty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType source_fun;
+  source_fun = std::move(source_fun);
+
+  // This space intentionally left blank.
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignNonempty) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+  source_fun = std::move(source_fun);
+
+  // This space intentionally left blank.
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun;
+  fun = nullptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+  UnqualifiedFunType* const null_fun_ptr = nullptr;
+  AnyInvType fun;
+  fun = null_fun_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+  const MemFunPtrType null_mem_fun_ptr = nullptr;
+  AnyInvType fun;
+  fun = null_mem_fun_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrEmptyLhs) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+  using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+  const MemObjPtrType null_mem_obj_ptr = nullptr;
+  UnaryAnyInvType fun;
+  fun = null_mem_obj_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun;
+  fun = &Int::MemberFunctionAdd;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrEmptyLhs) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+
+  UnaryAnyInvType fun;
+  fun = &Int::value;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun;
+  fun = add_function;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+             AssignCompatibleAnyInvocableEmptyLhsEmptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+  CompatibleAnyInvType other;
+  AnyInvType fun;
+  fun = std::move(other);
+
+  EXPECT_FALSE(static_cast<bool>(other));  // NOLINT
+  EXPECT_EQ(other, nullptr);               // NOLINT
+  EXPECT_EQ(nullptr, other);               // NOLINT
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+             AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+  CompatibleAnyInvType other = &add_function;
+  AnyInvType fun;
+  fun = std::move(other);
+
+  EXPECT_FALSE(static_cast<bool>(other));  // NOLINT
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun = &mult_function;
+  fun = nullptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+  UnqualifiedFunType* const null_fun_ptr = nullptr;
+  AnyInvType fun = &mult_function;
+  fun = null_fun_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+  const MemFunPtrType null_mem_fun_ptr = nullptr;
+  AnyInvType fun = &mult_function;
+  fun = null_mem_fun_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrNonemptyLhs) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+  using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+  const MemObjPtrType null_mem_obj_ptr = nullptr;
+  UnaryAnyInvType fun = &square_function;
+  fun = null_mem_obj_ptr;
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun = &mult_function;
+  fun = &Int::MemberFunctionAdd;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrNonemptyLhs) {
+  using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+
+  UnaryAnyInvType fun = &square_function;
+  fun = &Int::value;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  AnyInvType fun = &mult_function;
+  fun = add_function;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+             AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+  CompatibleAnyInvType other;
+  AnyInvType fun = &mult_function;
+  fun = std::move(other);
+
+  EXPECT_FALSE(static_cast<bool>(other));  // NOLINT
+  EXPECT_EQ(other, nullptr);               // NOLINT
+  EXPECT_EQ(nullptr, other);               // NOLINT
+
+  EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+             AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+  CompatibleAnyInvType other = &add_function;
+  AnyInvType fun = &mult_function;
+  fun = std::move(other);
+
+  EXPECT_FALSE(static_cast<bool>(other));  // NOLINT
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsEmptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  // Swap idiom
+  {
+    AnyInvType fun;
+    AnyInvType other;
+
+    using std::swap;
+    swap(fun, other);
+
+    EXPECT_FALSE(static_cast<bool>(fun));
+    EXPECT_FALSE(static_cast<bool>(other));
+
+    EXPECT_TRUE(
+        absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+  }
+
+  // Member swap
+  {
+    AnyInvType fun;
+    AnyInvType other;
+
+    fun.swap(other);
+
+    EXPECT_FALSE(static_cast<bool>(fun));
+    EXPECT_FALSE(static_cast<bool>(other));
+
+    EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+  }
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsNonemptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  // Swap idiom
+  {
+    AnyInvType fun;
+    AnyInvType other(absl::in_place_type<AddType>, 5);
+
+    using std::swap;
+    swap(fun, other);
+
+    EXPECT_TRUE(static_cast<bool>(fun));
+    EXPECT_FALSE(static_cast<bool>(other));
+
+    EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+    EXPECT_TRUE(
+        absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+  }
+
+  // Member swap
+  {
+    AnyInvType fun;
+    AnyInvType other(absl::in_place_type<AddType>, 5);
+
+    fun.swap(other);
+
+    EXPECT_TRUE(static_cast<bool>(fun));
+    EXPECT_FALSE(static_cast<bool>(other));
+
+    EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+    EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+  }
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsEmptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  // Swap idiom
+  {
+    AnyInvType fun(absl::in_place_type<AddType>, 5);
+    AnyInvType other;
+
+    using std::swap;
+    swap(fun, other);
+
+    EXPECT_FALSE(static_cast<bool>(fun));
+    EXPECT_TRUE(static_cast<bool>(other));
+
+    EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+    EXPECT_TRUE(
+        absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+  }
+
+  // Member swap
+  {
+    AnyInvType fun(absl::in_place_type<AddType>, 5);
+    AnyInvType other;
+
+    fun.swap(other);
+
+    EXPECT_FALSE(static_cast<bool>(fun));
+    EXPECT_TRUE(static_cast<bool>(other));
+
+    EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+    EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+  }
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsNonemptyRhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  // Swap idiom
+  {
+    AnyInvType fun(absl::in_place_type<AddType>, 5);
+    AnyInvType other(absl::in_place_type<AddType>, 6);
+
+    using std::swap;
+    swap(fun, other);
+
+    EXPECT_TRUE(static_cast<bool>(fun));
+    EXPECT_TRUE(static_cast<bool>(other));
+
+    EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+    EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+    EXPECT_TRUE(
+        absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+  }
+
+  // Member swap
+  {
+    AnyInvType fun(absl::in_place_type<AddType>, 5);
+    AnyInvType other(absl::in_place_type<AddType>, 6);
+
+    fun.swap(other);
+
+    EXPECT_TRUE(static_cast<bool>(fun));
+    EXPECT_TRUE(static_cast<bool>(other));
+
+    EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+    EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+    EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+  }
+}
+
+template <class T>
+class AnyInvTestMovable : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestMovable);
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionUserDefinedType) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun(AddType(5));
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionVoidCovariance) {
+  using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  VoidAnyInvType fun(AddType(5));
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun;
+  fun = AddType(5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun = &add_function;
+  fun = AddType(5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionAssignVoidCovariance) {
+  using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  VoidAnyInvType fun;
+  fun = AddType(5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+template <class T>
+class AnyInvTestNoexceptFalse : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse);
+
+TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionConstructionConstraints) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  EXPECT_TRUE((std::is_constructible<
+               AnyInvType,
+               typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+  EXPECT_FALSE((
+      std::is_constructible<AnyInvType,
+                            typename TypeParam::IncompatibleInvocable>::value));
+}
+
+TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionAssignConstraints) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  EXPECT_TRUE((std::is_assignable<
+               AnyInvType&,
+               typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+  EXPECT_FALSE(
+      (std::is_assignable<AnyInvType&,
+                          typename TypeParam::IncompatibleInvocable>::value));
+}
+
+template <class T>
+class AnyInvTestNoexceptTrue : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue);
+
+TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionConstructionConstraints) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+  GTEST_SKIP() << "Noexcept was not part of the type system before C++17.";
+#else
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  EXPECT_FALSE((std::is_constructible<
+                AnyInvType,
+                typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+  EXPECT_FALSE((
+      std::is_constructible<AnyInvType,
+                            typename TypeParam::IncompatibleInvocable>::value));
+#endif
+}
+
+TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionAssignConstraints) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+  GTEST_SKIP() << "Noexcept was not part of the type system before C++17.";
+#else
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  EXPECT_FALSE((std::is_assignable<
+                AnyInvType&,
+                typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+  EXPECT_FALSE(
+      (std::is_assignable<AnyInvType&,
+                          typename TypeParam::IncompatibleInvocable>::value));
+#endif
+}
+
+template <class T>
+class AnyInvTestNonRvalue : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestNonRvalue);
+
+TYPED_TEST_P(AnyInvTestNonRvalue, ConversionConstructionReferenceWrapper) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AddType add(4);
+  AnyInvType fun = std::ref(add);
+  add.state = 5;
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestNonRvalue, NonMoveableResultType) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+  GTEST_SKIP() << "Copy/move elision was not standard before C++17";
+#else
+  // Define a result type that cannot be copy- or move-constructed.
+  struct Result {
+    int x;
+
+    explicit Result(const int x_in) : x(x_in) {}
+    Result(Result&&) = delete;
+  };
+
+  static_assert(!std::is_move_constructible<Result>::value, "");
+  static_assert(!std::is_copy_constructible<Result>::value, "");
+
+  // Assumption check: it should nevertheless be possible to use functors that
+  // return a Result struct according to the language rules.
+  const auto return_17 = []() noexcept { return Result(17); };
+  EXPECT_EQ(17, return_17().x);
+
+  // Just like plain functors, it should work fine to use an AnyInvocable that
+  // returns the non-moveable type.
+  using UnqualifiedFun =
+      absl::conditional_t<TypeParam::kIsNoexcept, Result() noexcept, Result()>;
+
+  using Fun =
+      GiveQualifiersToFun<typename TypeParam::Qualifiers, UnqualifiedFun>;
+
+  AnyInvocable<Fun> any_inv(return_17);
+  EXPECT_EQ(17, any_inv().x);
+#endif
+}
+
+TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperEmptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AddType add(4);
+  AnyInvType fun;
+  fun = std::ref(add);
+  add.state = 5;
+  EXPECT_TRUE(
+      (std::is_nothrow_assignable<AnyInvType&,
+                                  std::reference_wrapper<AddType>>::value));
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperNonemptyLhs) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AddType add(4);
+  AnyInvType fun = &mult_function;
+  fun = std::ref(add);
+  add.state = 5;
+  EXPECT_TRUE(
+      (std::is_nothrow_assignable<AnyInvType&,
+                                  std::reference_wrapper<AddType>>::value));
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+template <class T>
+class AnyInvTestRvalue : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestRvalue);
+
+TYPED_TEST_P(AnyInvTestRvalue, ConversionConstructionReferenceWrapper) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  EXPECT_FALSE((
+      std::is_convertible<std::reference_wrapper<AddType>, AnyInvType>::value));
+}
+
+TYPED_TEST_P(AnyInvTestRvalue, NonMoveableResultType) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+  GTEST_SKIP() << "Copy/move elision was not standard before C++17";
+#else
+  // Define a result type that cannot be copy- or move-constructed.
+  struct Result {
+    int x;
+
+    explicit Result(const int x_in) : x(x_in) {}
+    Result(Result&&) = delete;
+  };
+
+  static_assert(!std::is_move_constructible<Result>::value, "");
+  static_assert(!std::is_copy_constructible<Result>::value, "");
+
+  // Assumption check: it should nevertheless be possible to use functors that
+  // return a Result struct according to the language rules.
+  const auto return_17 = []() noexcept { return Result(17); };
+  EXPECT_EQ(17, return_17().x);
+
+  // Just like plain functors, it should work fine to use an AnyInvocable that
+  // returns the non-moveable type.
+  using UnqualifiedFun =
+      absl::conditional_t<TypeParam::kIsNoexcept, Result() noexcept, Result()>;
+
+  using Fun =
+      GiveQualifiersToFun<typename TypeParam::Qualifiers, UnqualifiedFun>;
+
+  EXPECT_EQ(17, AnyInvocable<Fun>(return_17)().x);
+#endif
+}
+
+TYPED_TEST_P(AnyInvTestRvalue, ConversionAssignReferenceWrapper) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  EXPECT_FALSE((
+      std::is_assignable<AnyInvType&, std::reference_wrapper<AddType>>::value));
+}
+
+TYPED_TEST_P(AnyInvTestRvalue, NonConstCrashesOnSecondCall) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+  using AddType = typename TypeParam::AddType;
+
+  AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+  EXPECT_TRUE(static_cast<bool>(fun));
+  std::move(fun)(7, 8, 9);
+
+  // Ensure we're still valid
+  EXPECT_TRUE(static_cast<bool>(fun));  // NOLINT(bugprone-use-after-move)
+
+#if !defined(NDEBUG)
+  EXPECT_DEATH_IF_SUPPORTED(std::move(fun)(7, 8, 9), "");
+#endif
+}
+
+// Ensure that any qualifiers (in particular &&-qualifiers) do not affect
+// when the destructor is actually run.
+TYPED_TEST_P(AnyInvTestRvalue, QualifierIndependentObjectLifetime) {
+  using AnyInvType = typename TypeParam::AnyInvType;
+
+  auto refs = std::make_shared<std::nullptr_t>();
+  {
+    AnyInvType fun([refs](auto&&...) noexcept { return 0; });
+    EXPECT_GT(refs.use_count(), 1);
+
+    std::move(fun)(7, 8, 9);
+
+    // Ensure destructor hasn't run even if rref-qualified
+    EXPECT_GT(refs.use_count(), 1);
+  }
+  EXPECT_EQ(refs.use_count(), 1);
+}
+
+// NOTE: This test suite originally attempted to enumerate all possible
+// combinations of type properties but the build-time started getting too large.
+// Instead, it is now assumed that certain parameters are orthogonal and so
+// some combinations are elided.
+
+// A metafunction to form a TypeList of all cv and non-rvalue ref combinations,
+// coupled with all of the other explicitly specified parameters.
+template <Movable Mov, Destructible Dest, NothrowCall CallExceptionSpec,
+          ObjSize Size, ObjAlign Align>
+using NonRvalueQualifiedTestParams = ::testing::Types<               //
+    TestParams<Mov, Dest, _, CallExceptionSpec, Size, Align>,        //
+    TestParams<Mov, Dest, const _, CallExceptionSpec, Size, Align>,  //
+    TestParams<Mov, Dest, _&, CallExceptionSpec, Size, Align>,       //
+    TestParams<Mov, Dest, const _&, CallExceptionSpec, Size, Align>>;
+
+// A metafunction to form a TypeList of const and non-const rvalue ref
+// qualifiers, coupled with all of the other explicitly specified parameters.
+template <Movable Mov, Destructible Dest, NothrowCall CallExceptionSpec,
+          ObjSize Size, ObjAlign Align>
+using RvalueQualifiedTestParams = ::testing::Types<
+    TestParams<Mov, Dest, _&&, CallExceptionSpec, Size, Align>,       //
+    TestParams<Mov, Dest, const _&&, CallExceptionSpec, Size, Align>  //
+    >;
+
+// All qualifier combinations and a noexcept function type
+using TestParameterListNonRvalueQualifiersNothrowCall =
+    NonRvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+                                 NothrowCall::yes, ObjSize::small,
+                                 ObjAlign::normal>;
+using TestParameterListRvalueQualifiersNothrowCall =
+    RvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+                              NothrowCall::yes, ObjSize::small,
+                              ObjAlign::normal>;
+
+// All qualifier combinations and a non-noexcept function type
+using TestParameterListNonRvalueQualifiersCallMayThrow =
+    NonRvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+                                 NothrowCall::no, ObjSize::small,
+                                 ObjAlign::normal>;
+using TestParameterListRvalueQualifiersCallMayThrow =
+    RvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+                              NothrowCall::no, ObjSize::small,
+                              ObjAlign::normal>;
+
+// Lists of various cases that should lead to remote storage
+using TestParameterListRemoteMovable = ::testing::Types<
+    // "Normal" aligned types that are large and have trivial destructors
+    TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>,  //
+    TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>,  //
+    TestParams<Movable::yes, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+    TestParams<Movable::yes, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>,  //
+
+    // Same as above but with non-trivial destructors
+    TestParams<Movable::trivial, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>,  //
+    TestParams<Movable::nothrow, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>,  //
+    TestParams<Movable::yes, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+    TestParams<Movable::yes, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>  //
+
+// Dynamic memory allocation for over-aligned data was introduced in C++17.
+// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0035r4.html
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+    // Types that must use remote storage because of a large alignment.
+    ,
+    TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::large>,  //
+    TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::large>,  //
+    TestParams<Movable::trivial, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::large>,  //
+    TestParams<Movable::nothrow, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::large>  //
+#endif
+    >;
+using TestParameterListRemoteNonMovable = ::testing::Types<
+    // "Normal" aligned types that are large and have trivial destructors
+    TestParams<Movable::no, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+    TestParams<Movable::no, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>,  //
+    // Same as above but with non-trivial destructors
+    TestParams<Movable::no, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+    TestParams<Movable::no, Destructible::nothrow, _, NothrowCall::no,
+               ObjSize::large, ObjAlign::normal>  //
+    >;
+
+// Parameters that lead to local storage
+using TestParameterListLocal = ::testing::Types<
+    // Types that meet the requirements and have trivial destructors
+    TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+    TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+
+    // Same as above but with non-trivial destructors
+    TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>,  //
+    TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+               ObjSize::small, ObjAlign::normal>  //
+    >;
+
+// All of the tests that are run for every possible combination of types.
+REGISTER_TYPED_TEST_SUITE_P(
+    AnyInvTestBasic, DefaultConstruction, ConstructionNullptr,
+    ConstructionNullFunctionPtr, ConstructionNullMemberFunctionPtr,
+    ConstructionNullMemberObjectPtr, ConstructionMemberFunctionPtr,
+    ConstructionMemberObjectPtr, ConstructionFunctionReferenceDecay,
+    ConstructionCompatibleAnyInvocableEmpty,
+    ConstructionCompatibleAnyInvocableNonempty, InPlaceConstruction,
+    ConversionToBool, Invocation, InPlaceConstructionInitializerList,
+    InPlaceNullFunPtrConstruction, InPlaceNullFunPtrConstructionValueInit,
+    InPlaceNullMemFunPtrConstruction, InPlaceNullMemFunPtrConstructionValueInit,
+    InPlaceNullMemObjPtrConstruction, InPlaceNullMemObjPtrConstructionValueInit,
+    InPlaceVoidCovarianceConstruction, MoveConstructionFromEmpty,
+    MoveConstructionFromNonEmpty, ComparisonWithNullptrEmpty,
+    ComparisonWithNullptrNonempty, ResultType);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+    NonRvalueCallMayThrow, AnyInvTestBasic,
+    TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestBasic,
+                               TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestBasic,
+                               TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestBasic,
+                               TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestBasic, TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestBasic,
+                               TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestBasic,
+                               TestParameterListRvalueQualifiersNothrowCall);
+
+// Tests for functions that take two operands.
+REGISTER_TYPED_TEST_SUITE_P(
+    AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs,
+    MoveAssignEmptyLhsNonemptyRhs, MoveAssignNonemptyEmptyLhsRhs,
+    MoveAssignNonemptyLhsNonemptyRhs, SelfMoveAssignEmpty,
+    SelfMoveAssignNonempty, AssignNullptrEmptyLhs,
+    AssignNullFunctionPtrEmptyLhs, AssignNullMemberFunctionPtrEmptyLhs,
+    AssignNullMemberObjectPtrEmptyLhs, AssignMemberFunctionPtrEmptyLhs,
+    AssignMemberObjectPtrEmptyLhs, AssignFunctionReferenceDecayEmptyLhs,
+    AssignCompatibleAnyInvocableEmptyLhsEmptyRhs,
+    AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs, AssignNullptrNonemptyLhs,
+    AssignNullFunctionPtrNonemptyLhs, AssignNullMemberFunctionPtrNonemptyLhs,
+    AssignNullMemberObjectPtrNonemptyLhs, AssignMemberFunctionPtrNonemptyLhs,
+    AssignMemberObjectPtrNonemptyLhs, AssignFunctionReferenceDecayNonemptyLhs,
+    AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs,
+    AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs, SwapEmptyLhsEmptyRhs,
+    SwapEmptyLhsNonemptyRhs, SwapNonemptyLhsEmptyRhs,
+    SwapNonemptyLhsNonemptyRhs);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+    NonRvalueCallMayThrow, AnyInvTestCombinatoric,
+    TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestCombinatoric,
+                               TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestCombinatoric,
+                               TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestCombinatoric,
+                               TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestCombinatoric,
+                               TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestCombinatoric,
+                               TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestCombinatoric,
+                               TestParameterListRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestMovable,
+                            ConversionConstructionUserDefinedType,
+                            ConversionConstructionVoidCovariance,
+                            ConversionAssignUserDefinedTypeEmptyLhs,
+                            ConversionAssignUserDefinedTypeNonemptyLhs,
+                            ConversionAssignVoidCovariance);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+    NonRvalueCallMayThrow, AnyInvTestMovable,
+    TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestMovable,
+                               TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestMovable,
+                               TestParameterListRemoteMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestMovable,
+                               TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestMovable,
+                               TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestMovable,
+                               TestParameterListRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse,
+                            ConversionConstructionConstraints,
+                            ConversionAssignConstraints);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+    NonRvalueCallMayThrow, AnyInvTestNoexceptFalse,
+    TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestNoexceptFalse,
+                               TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNoexceptFalse,
+                               TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNoexceptFalse,
+                               TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNoexceptFalse,
+                               TestParameterListLocal);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue,
+                            ConversionConstructionConstraints,
+                            ConversionAssignConstraints);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNoexceptTrue,
+                               TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestNoexceptTrue,
+                               TestParameterListRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNonRvalue,
+                            ConversionConstructionReferenceWrapper,
+                            NonMoveableResultType,
+                            ConversionAssignReferenceWrapperEmptyLhs,
+                            ConversionAssignReferenceWrapperNonemptyLhs);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+    NonRvalueCallMayThrow, AnyInvTestNonRvalue,
+    TestParameterListNonRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNonRvalue,
+                               TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNonRvalue,
+                               TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNonRvalue,
+                               TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNonRvalue,
+                               TestParameterListNonRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestRvalue,
+                            ConversionConstructionReferenceWrapper,
+                            NonMoveableResultType,
+                            ConversionAssignReferenceWrapper,
+                            NonConstCrashesOnSecondCall,
+                            QualifierIndependentObjectLifetime);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestRvalue,
+                               TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestRvalue,
+                               TestParameterListRvalueQualifiersNothrowCall);
+
+// Minimal SFINAE testing for platforms where we can't run the tests, but we can
+// build binaries for.
+static_assert(
+    std::is_convertible<void (*)(), absl::AnyInvocable<void() &&>>::value, "");
+static_assert(!std::is_convertible<void*, absl::AnyInvocable<void() &&>>::value,
+              "");
+
+#undef ABSL_INTERNAL_NOEXCEPT_SPEC
+
+}  // namespace
diff --git a/abseil-cpp/absl/functional/bind_front.h b/abseil-cpp/absl/functional/bind_front.h
index 5b47970..a956eb0 100644
--- a/abseil-cpp/absl/functional/bind_front.h
+++ b/abseil-cpp/absl/functional/bind_front.h
@@ -30,6 +30,10 @@
 #ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_
 #define ABSL_FUNCTIONAL_BIND_FRONT_H_
 
+#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+#include <functional>  // For std::bind_front.
+#endif  // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+
 #include "absl/functional/internal/front_binder.h"
 #include "absl/utility/utility.h"
 
@@ -42,11 +46,12 @@
 //
 // Like `std::bind()`, `absl::bind_front()` is implicitly convertible to
 // `std::function`.  In particular, it may be used as a simpler replacement for
-// `std::bind()` in most cases, as it does not require  placeholders to be
+// `std::bind()` in most cases, as it does not require placeholders to be
 // specified. More importantly, it provides more reliable correctness guarantees
 // than `std::bind()`; while `std::bind()` will silently ignore passing more
 // parameters than expected, for example, `absl::bind_front()` will report such
-// mis-uses as errors.
+// mis-uses as errors. In C++20, `absl::bind_front` is replaced by
+// `std::bind_front`.
 //
 // absl::bind_front(a...) can be seen as storing the results of
 // std::make_tuple(a...).
@@ -170,6 +175,9 @@
 //   // Doesn't copy "hi".
 //   absl::bind_front(Print, absl::string_view(hi))("Chuk");
 //
+#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+using std::bind_front;
+#else   // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
 template <class F, class... BoundArgs>
 constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
     F&& func, BoundArgs&&... args) {
@@ -177,6 +185,7 @@
       absl::in_place, absl::forward<F>(func),
       absl::forward<BoundArgs>(args)...);
 }
+#endif  // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/functional/function_ref.h b/abseil-cpp/absl/functional/function_ref.h
index 6e03ac2..2b9139d 100644
--- a/abseil-cpp/absl/functional/function_ref.h
+++ b/abseil-cpp/absl/functional/function_ref.h
@@ -50,6 +50,7 @@
 #include <functional>
 #include <type_traits>
 
+#include "absl/base/attributes.h"
 #include "absl/functional/internal/function_ref.h"
 #include "absl/meta/type_traits.h"
 
@@ -65,10 +66,11 @@
 
 // FunctionRef
 //
-// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with
+// An `absl::FunctionRef` is a lightweight wrapper to any invocable object with
 // a compatible signature. Generally, an `absl::FunctionRef` should only be used
 // as an argument type and should be preferred as an argument over a const
-// reference to a `std::function`.
+// reference to a `std::function`. `absl::FunctionRef` itself does not allocate,
+// although the wrapped invocable may.
 //
 // Example:
 //
@@ -96,9 +98,10 @@
                               std::is_convertible<FR, R>::value>::type;
 
  public:
-  // Constructs a FunctionRef from any invokable type.
+  // Constructs a FunctionRef from any invocable type.
   template <typename F, typename = EnableIfCompatible<const F&>>
-  FunctionRef(const F& f)  // NOLINT(runtime/explicit)
+  // NOLINTNEXTLINE(runtime/explicit)
+  FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND)
       : invoker_(&absl::functional_internal::InvokeObject<F, R, Args...>) {
     absl::functional_internal::AssertNonNull(f);
     ptr_.obj = &f;
@@ -122,6 +125,7 @@
   // To help prevent subtle lifetime bugs, FunctionRef is not assignable.
   // Typically, it should only be used as an argument type.
   FunctionRef& operator=(const FunctionRef& rhs) = delete;
+  FunctionRef(const FunctionRef& rhs) = default;
 
   // Call the underlying object.
   R operator()(Args... args) const {
diff --git a/abseil-cpp/absl/functional/function_ref_test.cc b/abseil-cpp/absl/functional/function_ref_test.cc
index 3aa5974..c61117e 100644
--- a/abseil-cpp/absl/functional/function_ref_test.cc
+++ b/abseil-cpp/absl/functional/function_ref_test.cc
@@ -14,11 +14,13 @@
 
 #include "absl/functional/function_ref.h"
 
+#include <functional>
 #include <memory>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/container/internal/test_instance_tracker.h"
+#include "absl/functional/any_invocable.h"
 #include "absl/memory/memory.h"
 
 namespace absl {
@@ -156,6 +158,25 @@
   EXPECT_DEBUG_DEATH({ FunctionRef<int(const S& s)> ref(mem_ptr); }, "");
 }
 
+TEST(FunctionRef, NullStdFunctionAssertPasses) {
+  std::function<void()> function = []() {};
+  FunctionRef<void()> ref(function);
+}
+
+TEST(FunctionRef, NullStdFunctionAssertFails) {
+  std::function<void()> function = nullptr;
+  EXPECT_DEBUG_DEATH({ FunctionRef<void()> ref(function); }, "");
+}
+
+TEST(FunctionRef, NullAnyInvocableAssertPasses) {
+  AnyInvocable<void() const> invocable = []() {};
+  FunctionRef<void()> ref(invocable);
+}
+TEST(FunctionRef, NullAnyInvocableAssertFails) {
+  AnyInvocable<void() const> invocable = nullptr;
+  EXPECT_DEBUG_DEATH({ FunctionRef<void()> ref(invocable); }, "");
+}
+
 #endif  // GTEST_HAS_DEATH_TEST
 
 TEST(FunctionRef, CopiesAndMovesPerPassByValue) {
@@ -236,7 +257,7 @@
       "Reference types should be preserved");
 
   // Make sure the address of an object received by reference is the same as the
-  // addess of the object passed by the caller.
+  // address of the object passed by the caller.
   {
     LargeTrivial obj;
     auto test = [&obj](LargeTrivial& input) { ASSERT_EQ(&input, &obj); };
@@ -252,6 +273,16 @@
   }
 }
 
+TEST(FunctionRef, ReferenceToIncompleteType) {
+  struct IncompleteType;
+  auto test = [](IncompleteType&) {};
+  absl::FunctionRef<void(IncompleteType&)> ref(test);
+
+  struct IncompleteType {};
+  IncompleteType obj;
+  ref(obj);
+}
+
 }  // namespace
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/functional/function_ref_benchmark.cc b/abseil-cpp/absl/functional/function_type_benchmark.cc
similarity index 78%
rename from abseil-cpp/absl/functional/function_ref_benchmark.cc
rename to abseil-cpp/absl/functional/function_type_benchmark.cc
index 045305b..03dc31d 100644
--- a/abseil-cpp/absl/functional/function_ref_benchmark.cc
+++ b/abseil-cpp/absl/functional/function_type_benchmark.cc
@@ -1,4 +1,4 @@
-// Copyright 2019 The Abseil Authors.
+// Copyright 2022 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,12 +12,14 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "absl/functional/function_ref.h"
-
+#include <functional>
 #include <memory>
+#include <string>
 
 #include "benchmark/benchmark.h"
 #include "absl/base/attributes.h"
+#include "absl/functional/any_invocable.h"
+#include "absl/functional/function_ref.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -61,6 +63,12 @@
 }
 BENCHMARK(BM_TrivialFunctionRef);
 
+void BM_TrivialAnyInvocable(benchmark::State& state) {
+  ConstructAndCallFunctionBenchmark<AnyInvocable<void()>>(state,
+                                                          TrivialFunctor{});
+}
+BENCHMARK(BM_TrivialAnyInvocable);
+
 void BM_LargeStdFunction(benchmark::State& state) {
   ConstructAndCallFunctionBenchmark<std::function<void()>>(state,
                                                            LargeFunctor{});
@@ -72,6 +80,13 @@
 }
 BENCHMARK(BM_LargeFunctionRef);
 
+
+void BM_LargeAnyInvocable(benchmark::State& state) {
+  ConstructAndCallFunctionBenchmark<AnyInvocable<void()>>(state,
+                                                          LargeFunctor{});
+}
+BENCHMARK(BM_LargeAnyInvocable);
+
 void BM_FunPtrStdFunction(benchmark::State& state) {
   ConstructAndCallFunctionBenchmark<std::function<void()>>(state, FreeFunction);
 }
@@ -82,6 +97,11 @@
 }
 BENCHMARK(BM_FunPtrFunctionRef);
 
+void BM_FunPtrAnyInvocable(benchmark::State& state) {
+  ConstructAndCallFunctionBenchmark<AnyInvocable<void()>>(state, FreeFunction);
+}
+BENCHMARK(BM_FunPtrAnyInvocable);
+
 // Doesn't include construction or copy overhead in the loop.
 template <typename Function, typename Callable, typename... Args>
 void CallFunctionBenchmark(benchmark::State& state, const Callable& c,
@@ -113,6 +133,12 @@
 }
 BENCHMARK(BM_TrivialArgsFunctionRef);
 
+void BM_TrivialArgsAnyInvocable(benchmark::State& state) {
+  CallFunctionBenchmark<AnyInvocable<void(int, int, int)>>(
+      state, FunctorWithTrivialArgs{}, 1, 2, 3);
+}
+BENCHMARK(BM_TrivialArgsAnyInvocable);
+
 struct FunctorWithNonTrivialArgs {
   void operator()(std::string a, std::string b, std::string c) const {
     benchmark::DoNotOptimize(&a);
@@ -137,6 +163,14 @@
 }
 BENCHMARK(BM_NonTrivialArgsFunctionRef);
 
+void BM_NonTrivialArgsAnyInvocable(benchmark::State& state) {
+  std::string a, b, c;
+  CallFunctionBenchmark<
+      AnyInvocable<void(std::string, std::string, std::string)>>(
+      state, FunctorWithNonTrivialArgs{}, a, b, c);
+}
+BENCHMARK(BM_NonTrivialArgsAnyInvocable);
+
 }  // namespace
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/functional/internal/any_invocable.h b/abseil-cpp/absl/functional/internal/any_invocable.h
new file mode 100644
index 0000000..f096bb0
--- /dev/null
+++ b/abseil-cpp/absl/functional/internal/any_invocable.h
@@ -0,0 +1,891 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Implementation details for `absl::AnyInvocable`
+
+#ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
+#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
+
+////////////////////////////////////////////////////////////////////////////////
+//                                                                            //
+// This implementation of the proposed `any_invocable` uses an approach that  //
+// chooses between local storage and remote storage for the contained target  //
+// object based on the target object's size, alignment requirements, and      //
+// whether or not it has a nothrow move constructor. Additional optimizations //
+// are performed when the object is a trivially copyable type [basic.types].  //
+//                                                                            //
+// There are three datamembers per `AnyInvocable` instance                    //
+//                                                                            //
+// 1) A union containing either                                               //
+//        - A pointer to the target object referred to via a void*, or        //
+//        - the target object, emplaced into a raw char buffer                //
+//                                                                            //
+// 2) A function pointer to a "manager" function operation that takes a       //
+//    discriminator and logically branches to either perform a move operation //
+//    or destroy operation based on that discriminator.                       //
+//                                                                            //
+// 3) A function pointer to an "invoker" function operation that invokes the  //
+//    target object, directly returning the result.                           //
+//                                                                            //
+// When in the logically empty state, the manager function is an empty        //
+// function and the invoker function is one that would be undefined-behavior  //
+// to call.                                                                   //
+//                                                                            //
+// An additional optimization is performed when converting from one           //
+// AnyInvocable to another where only the noexcept specification and/or the   //
+// cv/ref qualifiers of the function type differ. In these cases, the         //
+// conversion works by "moving the guts", similar to if they were the same    //
+// exact type, as opposed to having to perform an additional layer of         //
+// wrapping through remote storage.                                           //
+//                                                                            //
+////////////////////////////////////////////////////////////////////////////////
+
+// IWYU pragma: private, include "absl/functional/any_invocable.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <exception>
+#include <functional>
+#include <initializer_list>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/invoke.h"
+#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// Helper macro used to prevent spelling `noexcept` in language versions older
+// than C++17, where it is not part of the type system, in order to avoid
+// compilation failures and internal compiler errors.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
+#else
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
+#endif
+
+// Defined in functional/any_invocable.h
+template <class Sig>
+class AnyInvocable;
+
+namespace internal_any_invocable {
+
+// Constants relating to the small-object-storage for AnyInvocable
+enum StorageProperty : std::size_t {
+  kAlignment = alignof(std::max_align_t),  // The alignment of the storage
+  kStorageSize = sizeof(void*) * 2         // The size of the storage
+};
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction for checking if a type is an AnyInvocable instantiation.
+// This is used during conversion operations.
+template <class T>
+struct IsAnyInvocable : std::false_type {};
+
+template <class Sig>
+struct IsAnyInvocable<AnyInvocable<Sig>> : std::true_type {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A type trait that tells us whether or not a target function type should be
+// stored locally in the small object optimization storage
+template <class T>
+using IsStoredLocally = std::integral_constant<
+    bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment &&
+              kAlignment % alignof(T) == 0 &&
+              std::is_nothrow_move_constructible<T>::value>;
+
+// An implementation of std::remove_cvref_t of C++20.
+template <class T>
+using RemoveCVRef =
+    typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// An implementation of the C++ standard INVOKE<R> pseudo-macro, operation is
+// equivalent to std::invoke except that it forces an implicit conversion to the
+// specified return type. If "R" is void, the function is executed and the
+// return value is simply ignored.
+template <class ReturnType, class F, class... P,
+          typename = absl::enable_if_t<std::is_void<ReturnType>::value>>
+void InvokeR(F&& f, P&&... args) {
+  absl::base_internal::invoke(std::forward<F>(f), std::forward<P>(args)...);
+}
+
+template <class ReturnType, class F, class... P,
+          absl::enable_if_t<!std::is_void<ReturnType>::value, int> = 0>
+ReturnType InvokeR(F&& f, P&&... args) {
+  // GCC 12 has a false-positive -Wmaybe-uninitialized warning here.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+  return absl::base_internal::invoke(std::forward<F>(f),
+                                     std::forward<P>(args)...);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
+}
+
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+///
+// A metafunction that takes a "T" corresponding to a parameter type of the
+// user's specified function type, and yields the parameter type to use for the
+// type-erased invoker. In order to prevent observable moves, this must be
+// either a reference or, if the type is trivial, the original parameter type
+// itself. Since the parameter type may be incomplete at the point that this
+// metafunction is used, we can only do this optimization for scalar types
+// rather than for any trivial type.
+template <typename T>
+T ForwardImpl(std::true_type);
+
+template <typename T>
+T&& ForwardImpl(std::false_type);
+
+// NOTE: We deliberately use an intermediate struct instead of a direct alias,
+// as a workaround for b/206991861 on MSVC versions < 1924.
+template <class T>
+struct ForwardedParameter {
+  using type = decltype((
+      ForwardImpl<T>)(std::integral_constant<bool,
+                                             std::is_scalar<T>::value>()));
+};
+
+template <class T>
+using ForwardedParameterType = typename ForwardedParameter<T>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A discriminator when calling the "manager" function that describes operation
+// type-erased operation should be invoked.
+//
+// "relocate_from_to" specifies that the manager should perform a move.
+//
+// "dispose" specifies that the manager should perform a destroy.
+enum class FunctionToCall : bool { relocate_from_to, dispose };
+
+// The portion of `AnyInvocable` state that contains either a pointer to the
+// target object or the object itself in local storage
+union TypeErasedState {
+  struct {
+    // A pointer to the type-erased object when remotely stored
+    void* target;
+    // The size of the object for `RemoteManagerTrivial`
+    std::size_t size;
+  } remote;
+
+  // Local-storage for the type-erased object when small and trivial enough
+  alignas(kAlignment) char storage[kStorageSize];
+};
+
+// A typed accessor for the object in `TypeErasedState` storage
+template <class T>
+T& ObjectInLocalStorage(TypeErasedState* const state) {
+  // We launder here because the storage may be reused with the same type.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
+  return *std::launder(reinterpret_cast<T*>(&state->storage));
+#elif ABSL_HAVE_BUILTIN(__builtin_launder)
+  return *__builtin_launder(reinterpret_cast<T*>(&state->storage));
+#else
+
+  // When `std::launder` or equivalent are not available, we rely on undefined
+  // behavior, which works as intended on Abseil's officially supported
+  // platforms as of Q2 2022.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#pragma GCC diagnostic push
+#endif
+  return *reinterpret_cast<T*>(&state->storage);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+#endif
+}
+
+// The type for functions issuing lifetime-related operations: move and dispose
+// A pointer to such a function is contained in each `AnyInvocable` instance.
+// NOTE: When specifying `FunctionToCall::`dispose, the same state must be
+// passed as both "from" and "to".
+using ManagerType = void(FunctionToCall /*operation*/,
+                         TypeErasedState* /*from*/, TypeErasedState* /*to*/)
+    ABSL_INTERNAL_NOEXCEPT_SPEC(true);
+
+// The type for functions issuing the actual invocation of the object
+// A pointer to such a function is contained in each AnyInvocable instance.
+template <bool SigIsNoexcept, class ReturnType, class... P>
+using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType<P>...)
+    ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept);
+
+// The manager that is used when AnyInvocable is empty
+inline void EmptyManager(FunctionToCall /*operation*/,
+                         TypeErasedState* /*from*/,
+                         TypeErasedState* /*to*/) noexcept {}
+
+// The manager that is used when a target function is in local storage and is
+// a trivially copyable type.
+inline void LocalManagerTrivial(FunctionToCall /*operation*/,
+                                TypeErasedState* const from,
+                                TypeErasedState* const to) noexcept {
+  // This single statement without branching handles both possible operations.
+  //
+  // For FunctionToCall::dispose, "from" and "to" point to the same state, and
+  // so this assignment logically would do nothing.
+  //
+  // Note: Correctness here relies on http://wg21.link/p0593, which has only
+  // become standard in C++20, though implementations do not break it in
+  // practice for earlier versions of C++.
+  //
+  // The correct way to do this without that paper is to first placement-new a
+  // default-constructed T in "to->storage" prior to the memmove, but doing so
+  // requires a different function to be created for each T that is stored
+  // locally, which can cause unnecessary bloat and be less cache friendly.
+  *to = *from;
+
+  // Note: Because the type is trivially copyable, the destructor does not need
+  // to be called ("trivially copyable" requires a trivial destructor).
+}
+
+// The manager that is used when a target function is in local storage and is
+// not a trivially copyable type.
+template <class T>
+void LocalManagerNontrivial(FunctionToCall operation,
+                            TypeErasedState* const from,
+                            TypeErasedState* const to) noexcept {
+  static_assert(IsStoredLocally<T>::value,
+                "Local storage must only be used for supported types.");
+  static_assert(!std::is_trivially_copyable<T>::value,
+                "Locally stored types must be trivially copyable.");
+
+  T& from_object = (ObjectInLocalStorage<T>)(from);
+
+  switch (operation) {
+    case FunctionToCall::relocate_from_to:
+      // NOTE: Requires that the left-hand operand is already empty.
+      ::new (static_cast<void*>(&to->storage)) T(std::move(from_object));
+      ABSL_FALLTHROUGH_INTENDED;
+    case FunctionToCall::dispose:
+      from_object.~T();  // Must not throw. // NOLINT
+      return;
+  }
+  ABSL_UNREACHABLE();
+}
+
+// The invoker that is used when a target function is in local storage
+// Note: QualTRef here is the target function type along with cv and reference
+// qualifiers that must be used when calling the function.
+template <bool SigIsNoexcept, class ReturnType, class QualTRef, class... P>
+ReturnType LocalInvoker(
+    TypeErasedState* const state,
+    ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
+  using RawT = RemoveCVRef<QualTRef>;
+  static_assert(
+      IsStoredLocally<RawT>::value,
+      "Target object must be in local storage in order to be invoked from it.");
+
+  auto& f = (ObjectInLocalStorage<RawT>)(state);
+  return (InvokeR<ReturnType>)(static_cast<QualTRef>(f),
+                               static_cast<ForwardedParameterType<P>>(args)...);
+}
+
+// The manager that is used when a target function is in remote storage and it
+// has a trivial destructor
+inline void RemoteManagerTrivial(FunctionToCall operation,
+                                 TypeErasedState* const from,
+                                 TypeErasedState* const to) noexcept {
+  switch (operation) {
+    case FunctionToCall::relocate_from_to:
+      // NOTE: Requires that the left-hand operand is already empty.
+      to->remote = from->remote;
+      return;
+    case FunctionToCall::dispose:
+#if defined(__cpp_sized_deallocation)
+      ::operator delete(from->remote.target, from->remote.size);
+#else   // __cpp_sized_deallocation
+      ::operator delete(from->remote.target);
+#endif  // __cpp_sized_deallocation
+      return;
+  }
+  ABSL_UNREACHABLE();
+}
+
+// The manager that is used when a target function is in remote storage and the
+// destructor of the type is not trivial
+template <class T>
+void RemoteManagerNontrivial(FunctionToCall operation,
+                             TypeErasedState* const from,
+                             TypeErasedState* const to) noexcept {
+  static_assert(!IsStoredLocally<T>::value,
+                "Remote storage must only be used for types that do not "
+                "qualify for local storage.");
+
+  switch (operation) {
+    case FunctionToCall::relocate_from_to:
+      // NOTE: Requires that the left-hand operand is already empty.
+      to->remote.target = from->remote.target;
+      return;
+    case FunctionToCall::dispose:
+      ::delete static_cast<T*>(from->remote.target);  // Must not throw.
+      return;
+  }
+  ABSL_UNREACHABLE();
+}
+
+// The invoker that is used when a target function is in remote storage
+template <bool SigIsNoexcept, class ReturnType, class QualTRef, class... P>
+ReturnType RemoteInvoker(
+    TypeErasedState* const state,
+    ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
+  using RawT = RemoveCVRef<QualTRef>;
+  static_assert(!IsStoredLocally<RawT>::value,
+                "Target object must be in remote storage in order to be "
+                "invoked from it.");
+
+  auto& f = *static_cast<RawT*>(state->remote.target);
+  return (InvokeR<ReturnType>)(static_cast<QualTRef>(f),
+                               static_cast<ForwardedParameterType<P>>(args)...);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that checks if a type T is an instantiation of
+// absl::in_place_type_t (needed for constructor constraints of AnyInvocable).
+template <class T>
+struct IsInPlaceType : std::false_type {};
+
+template <class T>
+struct IsInPlaceType<absl::in_place_type_t<T>> : std::true_type {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A constructor name-tag used with CoreImpl (below) to request the
+// conversion-constructor. QualDecayedTRef is the decayed-type of the object to
+// wrap, along with the cv and reference qualifiers that must be applied when
+// performing an invocation of the wrapped object.
+template <class QualDecayedTRef>
+struct TypedConversionConstruct {};
+
+// A helper base class for all core operations of AnyInvocable. Most notably,
+// this class creates the function call operator and constraint-checkers so that
+// the top-level class does not have to be a series of partial specializations.
+//
+// Note: This definition exists (as opposed to being a declaration) so that if
+// the user of the top-level template accidentally passes a template argument
+// that is not a function type, they will get a static_assert in AnyInvocable's
+// class body rather than an error stating that Impl is not defined.
+template <class Sig>
+class Impl {};  // Note: This is partially-specialized later.
+
+// A std::unique_ptr deleter that deletes memory allocated via ::operator new.
+#if defined(__cpp_sized_deallocation)
+class TrivialDeleter {
+ public:
+  explicit TrivialDeleter(std::size_t size) : size_(size) {}
+
+  void operator()(void* target) const {
+    ::operator delete(target, size_);
+  }
+
+ private:
+  std::size_t size_;
+};
+#else   // __cpp_sized_deallocation
+class TrivialDeleter {
+ public:
+  explicit TrivialDeleter(std::size_t) {}
+
+  void operator()(void* target) const { ::operator delete(target); }
+};
+#endif  // __cpp_sized_deallocation
+
+template <bool SigIsNoexcept, class ReturnType, class... P>
+class CoreImpl;
+
+constexpr bool IsCompatibleConversion(void*, void*) { return false; }
+template <bool NoExceptSrc, bool NoExceptDest, class... T>
+constexpr bool IsCompatibleConversion(CoreImpl<NoExceptSrc, T...>*,
+                                      CoreImpl<NoExceptDest, T...>*) {
+  return !NoExceptDest || NoExceptSrc;
+}
+
+// A helper base class for all core operations of AnyInvocable that do not
+// depend on the cv/ref qualifiers of the function type.
+template <bool SigIsNoexcept, class ReturnType, class... P>
+class CoreImpl {
+ public:
+  using result_type = ReturnType;
+
+  CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {}
+
+  enum class TargetType {
+    kPointer,
+    kCompatibleAnyInvocable,
+    kIncompatibleAnyInvocable,
+    kOther,
+  };
+
+  // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with
+  // the invocation of the Invocable. The unqualified type is the target object
+  // type to be stored.
+  template <class QualDecayedTRef, class F>
+  explicit CoreImpl(TypedConversionConstruct<QualDecayedTRef>, F&& f) {
+    using DecayedT = RemoveCVRef<QualDecayedTRef>;
+
+    constexpr TargetType kTargetType =
+        (std::is_pointer<DecayedT>::value ||
+         std::is_member_pointer<DecayedT>::value)
+            ? TargetType::kPointer
+        : IsCompatibleAnyInvocable<DecayedT>::value
+            ? TargetType::kCompatibleAnyInvocable
+        : IsAnyInvocable<DecayedT>::value
+            ? TargetType::kIncompatibleAnyInvocable
+            : TargetType::kOther;
+    // NOTE: We only use integers instead of enums as template parameters in
+    // order to work around a bug on C++14 under MSVC 2017.
+    // See b/236131881.
+    Initialize<kTargetType, QualDecayedTRef>(std::forward<F>(f));
+  }
+
+  // Note: QualTRef here includes the cv-ref qualifiers associated with the
+  // invocation of the Invocable. The unqualified type is the target object
+  // type to be stored.
+  template <class QualTRef, class... Args>
+  explicit CoreImpl(absl::in_place_type_t<QualTRef>, Args&&... args) {
+    InitializeStorage<QualTRef>(std::forward<Args>(args)...);
+  }
+
+  CoreImpl(CoreImpl&& other) noexcept {
+    other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
+    manager_ = other.manager_;
+    invoker_ = other.invoker_;
+    other.manager_ = EmptyManager;
+    other.invoker_ = nullptr;
+  }
+
+  CoreImpl& operator=(CoreImpl&& other) noexcept {
+    // Put the left-hand operand in an empty state.
+    //
+    // Note: A full reset that leaves us with an object that has its invariants
+    // intact is necessary in order to handle self-move. This is required by
+    // types that are used with certain operations of the standard library, such
+    // as the default definition of std::swap when both operands target the same
+    // object.
+    Clear();
+
+    // Perform the actual move/destroy operation on the target function.
+    other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
+    manager_ = other.manager_;
+    invoker_ = other.invoker_;
+    other.manager_ = EmptyManager;
+    other.invoker_ = nullptr;
+
+    return *this;
+  }
+
+  ~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); }
+
+  // Check whether or not the AnyInvocable is in the empty state.
+  bool HasValue() const { return invoker_ != nullptr; }
+
+  // Effects: Puts the object into its empty state.
+  void Clear() {
+    manager_(FunctionToCall::dispose, &state_, &state_);
+    manager_ = EmptyManager;
+    invoker_ = nullptr;
+  }
+
+  template <TargetType target_type, class QualDecayedTRef, class F,
+            absl::enable_if_t<target_type == TargetType::kPointer, int> = 0>
+  void Initialize(F&& f) {
+// This condition handles types that decay into pointers, which includes
+// function references. Since function references cannot be null, GCC warns
+// against comparing their decayed form with nullptr.
+// Since this is template-heavy code, we prefer to disable these warnings
+// locally instead of adding yet another overload of this function.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wpragmas"
+#pragma GCC diagnostic ignored "-Waddress"
+#pragma GCC diagnostic ignored "-Wnonnull-compare"
+#pragma GCC diagnostic push
+#endif
+    if (static_cast<RemoveCVRef<QualDecayedTRef>>(f) == nullptr) {
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+      manager_ = EmptyManager;
+      invoker_ = nullptr;
+      return;
+    }
+    InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+  }
+
+  template <TargetType target_type, class QualDecayedTRef, class F,
+            absl::enable_if_t<
+                target_type == TargetType::kCompatibleAnyInvocable, int> = 0>
+  void Initialize(F&& f) {
+    // In this case we can "steal the guts" of the other AnyInvocable.
+    f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
+    manager_ = f.manager_;
+    invoker_ = f.invoker_;
+
+    f.manager_ = EmptyManager;
+    f.invoker_ = nullptr;
+  }
+
+  template <TargetType target_type, class QualDecayedTRef, class F,
+            absl::enable_if_t<
+                target_type == TargetType::kIncompatibleAnyInvocable, int> = 0>
+  void Initialize(F&& f) {
+    if (f.HasValue()) {
+      InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+    } else {
+      manager_ = EmptyManager;
+      invoker_ = nullptr;
+    }
+  }
+
+  template <TargetType target_type, class QualDecayedTRef, class F,
+            typename = absl::enable_if_t<target_type == TargetType::kOther>>
+  void Initialize(F&& f) {
+    InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+  }
+
+  // Use local (inline) storage for applicable target object types.
+  template <class QualTRef, class... Args,
+            typename = absl::enable_if_t<
+                IsStoredLocally<RemoveCVRef<QualTRef>>::value>>
+  void InitializeStorage(Args&&... args) {
+    using RawT = RemoveCVRef<QualTRef>;
+    ::new (static_cast<void*>(&state_.storage))
+        RawT(std::forward<Args>(args)...);
+
+    invoker_ = LocalInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
+    // We can simplify our manager if we know the type is trivially copyable.
+    InitializeLocalManager<RawT>();
+  }
+
+  // Use remote storage for target objects that cannot be stored locally.
+  template <class QualTRef, class... Args,
+            absl::enable_if_t<!IsStoredLocally<RemoveCVRef<QualTRef>>::value,
+                              int> = 0>
+  void InitializeStorage(Args&&... args) {
+    InitializeRemoteManager<RemoveCVRef<QualTRef>>(std::forward<Args>(args)...);
+    // This is set after everything else in case an exception is thrown in an
+    // earlier step of the initialization.
+    invoker_ = RemoteInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
+  }
+
+  template <class T,
+            typename = absl::enable_if_t<std::is_trivially_copyable<T>::value>>
+  void InitializeLocalManager() {
+    manager_ = LocalManagerTrivial;
+  }
+
+  template <class T,
+            absl::enable_if_t<!std::is_trivially_copyable<T>::value, int> = 0>
+  void InitializeLocalManager() {
+    manager_ = LocalManagerNontrivial<T>;
+  }
+
+  template <class T>
+  using HasTrivialRemoteStorage =
+      std::integral_constant<bool, std::is_trivially_destructible<T>::value &&
+                                       alignof(T) <=
+                                           ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>;
+
+  template <class T, class... Args,
+            typename = absl::enable_if_t<HasTrivialRemoteStorage<T>::value>>
+  void InitializeRemoteManager(Args&&... args) {
+    // unique_ptr is used for exception-safety in case construction throws.
+    std::unique_ptr<void, TrivialDeleter> uninitialized_target(
+        ::operator new(sizeof(T)), TrivialDeleter(sizeof(T)));
+    ::new (uninitialized_target.get()) T(std::forward<Args>(args)...);
+    state_.remote.target = uninitialized_target.release();
+    state_.remote.size = sizeof(T);
+    manager_ = RemoteManagerTrivial;
+  }
+
+  template <class T, class... Args,
+            absl::enable_if_t<!HasTrivialRemoteStorage<T>::value, int> = 0>
+  void InitializeRemoteManager(Args&&... args) {
+    state_.remote.target = ::new T(std::forward<Args>(args)...);
+    manager_ = RemoteManagerNontrivial<T>;
+  }
+
+  //////////////////////////////////////////////////////////////////////////////
+  //
+  // Type trait to determine if the template argument is an AnyInvocable whose
+  // function type is compatible enough with ours such that we can
+  // "move the guts" out of it when moving, rather than having to place a new
+  // object into remote storage.
+
+  template <typename Other>
+  struct IsCompatibleAnyInvocable {
+    static constexpr bool value = false;
+  };
+
+  template <typename Sig>
+  struct IsCompatibleAnyInvocable<AnyInvocable<Sig>> {
+    static constexpr bool value =
+        (IsCompatibleConversion)(static_cast<
+                                     typename AnyInvocable<Sig>::CoreImpl*>(
+                                     nullptr),
+                                 static_cast<CoreImpl*>(nullptr));
+  };
+
+  //
+  //////////////////////////////////////////////////////////////////////////////
+
+  TypeErasedState state_;
+  ManagerType* manager_;
+  InvokerType<SigIsNoexcept, ReturnType, P...>* invoker_;
+};
+
+// A constructor name-tag used with Impl to request the
+// conversion-constructor
+struct ConversionConstruct {};
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that is normally an identity metafunction except that when
+// given a std::reference_wrapper<T>, it yields T&. This is necessary because
+// currently std::reference_wrapper's operator() is not conditionally noexcept,
+// so when checking if such an Invocable is nothrow-invocable, we must pull out
+// the underlying type.
+template <class T>
+struct UnwrapStdReferenceWrapperImpl {
+  using type = T;
+};
+
+template <class T>
+struct UnwrapStdReferenceWrapperImpl<std::reference_wrapper<T>> {
+  using type = T&;
+};
+
+template <class T>
+using UnwrapStdReferenceWrapper =
+    typename UnwrapStdReferenceWrapperImpl<T>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// An alias that always yields std::true_type (used with constraints) where
+// substitution failures happen when forming the template arguments.
+template <class... T>
+using TrueAlias =
+    std::integral_constant<bool, sizeof(absl::void_t<T...>*) != 0>;
+
+/*SFINAE constraints for the conversion-constructor.*/
+template <class Sig, class F,
+          class = absl::enable_if_t<
+              !std::is_same<RemoveCVRef<F>, AnyInvocable<Sig>>::value>>
+using CanConvert = TrueAlias<
+    absl::enable_if_t<!IsInPlaceType<RemoveCVRef<F>>::value>,
+    absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
+    absl::enable_if_t<
+        Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
+    absl::enable_if_t<std::is_constructible<absl::decay_t<F>, F>::value>>;
+
+/*SFINAE constraints for the std::in_place constructors.*/
+template <class Sig, class F, class... Args>
+using CanEmplace = TrueAlias<
+    absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
+    absl::enable_if_t<
+        Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
+    absl::enable_if_t<std::is_constructible<absl::decay_t<F>, Args...>::value>>;
+
+/*SFINAE constraints for the conversion-assign operator.*/
+template <class Sig, class F,
+          class = absl::enable_if_t<
+              !std::is_same<RemoveCVRef<F>, AnyInvocable<Sig>>::value>>
+using CanAssign = TrueAlias<
+    absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
+    absl::enable_if_t<
+        Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
+    absl::enable_if_t<std::is_constructible<absl::decay_t<F>, F>::value>>;
+
+/*SFINAE constraints for the reference-wrapper conversion-assign operator.*/
+template <class Sig, class F>
+using CanAssignReferenceWrapper = TrueAlias<
+    absl::enable_if_t<
+        Impl<Sig>::template CallIsValid<std::reference_wrapper<F>>::value>,
+    absl::enable_if_t<Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<
+        std::reference_wrapper<F>>::value>>;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// The constraint for checking whether or not a call meets the noexcept
+// callability requirements. This is a preprocessor macro because specifying it
+// this way as opposed to a disjunction/branch can improve the user-side error
+// messages and avoids an instantiation of std::is_nothrow_invocable_r in the
+// cases where the user did not specify a noexcept function type.
+//
+#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \
+  ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals)
+
+// The disjunction below is because we can't rely on std::is_nothrow_invocable_r
+// to give the right result when ReturnType is non-moveable in toolchains that
+// don't treat non-moveable result types correctly. For example this was the
+// case in libc++ before commit c3a24882 (2022-05).
+#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals)      \
+  absl::enable_if_t<absl::disjunction<                                       \
+      std::is_nothrow_invocable_r<                                           \
+          ReturnType, UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, \
+          P...>,                                                             \
+      std::conjunction<                                                      \
+          std::is_nothrow_invocable<                                         \
+              UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, P...>,  \
+          std::is_same<                                                      \
+              ReturnType,                                                    \
+              absl::base_internal::invoke_result_t<                          \
+                  UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals,     \
+                  P...>>>>::value>
+
+#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals)
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A macro to generate partial specializations of Impl with the different
+// combinations of supported cv/reference qualifiers and noexcept specifier.
+//
+// Here, `cv` are the cv-qualifiers if any, `ref` is the ref-qualifier if any,
+// inv_quals is the reference type to be used when invoking the target, and
+// noex is "true" if the function type is noexcept, or false if it is not.
+//
+// The CallIsValid condition is more complicated than simply using
+// absl::base_internal::is_invocable_r because we can't rely on it to give the
+// right result when ReturnType is non-moveable in toolchains that don't treat
+// non-moveable result types correctly. For example this was the case in libc++
+// before commit c3a24882 (2022-05).
+#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex)            \
+  template <class ReturnType, class... P>                                      \
+  class Impl<ReturnType(P...) cv ref ABSL_INTERNAL_NOEXCEPT_SPEC(noex)>        \
+      : public CoreImpl<noex, ReturnType, P...> {                              \
+   public:                                                                     \
+    /*The base class, which contains the datamembers and core operations*/     \
+    using Core = CoreImpl<noex, ReturnType, P...>;                             \
+                                                                               \
+    /*SFINAE constraint to check if F is invocable with the proper signature*/ \
+    template <class F>                                                         \
+    using CallIsValid = TrueAlias<absl::enable_if_t<absl::disjunction<         \
+        absl::base_internal::is_invocable_r<ReturnType,                        \
+                                            absl::decay_t<F> inv_quals, P...>, \
+        std::is_same<ReturnType,                                               \
+                     absl::base_internal::invoke_result_t<                     \
+                         absl::decay_t<F> inv_quals, P...>>>::value>>;         \
+                                                                               \
+    /*SFINAE constraint to check if F is nothrow-invocable when necessary*/    \
+    template <class F>                                                         \
+    using CallIsNoexceptIfSigIsNoexcept =                                      \
+        TrueAlias<ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals,   \
+                                                                  noex)>;      \
+                                                                               \
+    /*Put the AnyInvocable into an empty state.*/                              \
+    Impl() = default;                                                          \
+                                                                               \
+    /*The implementation of a conversion-constructor from "f*/                 \
+    /*This forwards to Core, attaching inv_quals so that the base class*/      \
+    /*knows how to properly type-erase the invocation.*/                       \
+    template <class F>                                                         \
+    explicit Impl(ConversionConstruct, F&& f)                                  \
+        : Core(TypedConversionConstruct<                                       \
+                   typename std::decay<F>::type inv_quals>(),                  \
+               std::forward<F>(f)) {}                                          \
+                                                                               \
+    /*Forward along the in-place construction parameters.*/                    \
+    template <class T, class... Args>                                          \
+    explicit Impl(absl::in_place_type_t<T>, Args&&... args)                    \
+        : Core(absl::in_place_type<absl::decay_t<T> inv_quals>,                \
+               std::forward<Args>(args)...) {}                                 \
+                                                                               \
+    /*Raises a fatal error when the AnyInvocable is invoked after a move*/     \
+    static ReturnType InvokedAfterMove(                                        \
+      TypeErasedState*,                                                        \
+      ForwardedParameterType<P>...) noexcept(noex) {                           \
+      ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move");           \
+      std::terminate();                                                        \
+    }                                                                          \
+                                                                               \
+    InvokerType<noex, ReturnType, P...>* ExtractInvoker() cv {                 \
+      using QualifiedTestType = int cv ref;                                    \
+      auto* invoker = this->invoker_;                                          \
+      if (!std::is_const<QualifiedTestType>::value &&                          \
+          std::is_rvalue_reference<QualifiedTestType>::value) {                \
+        ABSL_ASSERT([this]() {                                                 \
+          /* We checked that this isn't const above, so const_cast is safe */  \
+          const_cast<Impl*>(this)->invoker_ = InvokedAfterMove;                \
+          return this->HasValue();                                             \
+        }());                                                                  \
+      }                                                                        \
+      return invoker;                                                          \
+    }                                                                          \
+                                                                               \
+    /*The actual invocation operation with the proper signature*/              \
+    ReturnType operator()(P... args) cv ref noexcept(noex) {                   \
+      assert(this->invoker_ != nullptr);                                       \
+      return this->ExtractInvoker()(                                           \
+          const_cast<TypeErasedState*>(&this->state_),                         \
+          static_cast<ForwardedParameterType<P>>(args)...);                    \
+    }                                                                          \
+  }
+
+// Define the `noexcept(true)` specialization only for C++17 and beyond, when
+// `noexcept` is part of the type system.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+// A convenience macro that defines specializations for the noexcept(true) and
+// noexcept(false) forms, given the other properties.
+#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals)    \
+  ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \
+  ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true)
+#else
+#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
+  ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false)
+#endif
+
+// Non-ref-qualified partial specializations
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &);
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&);
+
+// Lvalue-ref-qualified partial specializations
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &);
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&);
+
+// Rvalue-ref-qualified partial specializations
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&);
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&);
+
+// Undef the detail-only macros.
+#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL
+#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_
+#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false
+#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true
+#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT
+#undef ABSL_INTERNAL_NOEXCEPT_SPEC
+
+}  // namespace internal_any_invocable
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
diff --git a/abseil-cpp/absl/functional/internal/function_ref.h b/abseil-cpp/absl/functional/internal/function_ref.h
index b5bb8b4..1cd34a3 100644
--- a/abseil-cpp/absl/functional/internal/function_ref.h
+++ b/abseil-cpp/absl/functional/internal/function_ref.h
@@ -20,6 +20,7 @@
 #include <type_traits>
 
 #include "absl/base/internal/invoke.h"
+#include "absl/functional/any_invocable.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
@@ -40,18 +41,21 @@
 // Chooses the best type for passing T as an argument.
 // Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are
 // passed by value.
-template <typename T>
-constexpr bool PassByValue() {
-  return !std::is_lvalue_reference<T>::value &&
-         absl::is_trivially_copy_constructible<T>::value &&
-         absl::is_trivially_copy_assignable<
-             typename std::remove_cv<T>::type>::value &&
-         std::is_trivially_destructible<T>::value &&
-         sizeof(T) <= 2 * sizeof(void*);
-}
+template <typename T,
+          bool IsLValueReference = std::is_lvalue_reference<T>::value>
+struct PassByValue : std::false_type {};
 
 template <typename T>
-struct ForwardT : std::conditional<PassByValue<T>(), T, T&&> {};
+struct PassByValue<T, /*IsLValueReference=*/false>
+    : std::integral_constant<bool,
+                             absl::is_trivially_copy_constructible<T>::value &&
+                                 absl::is_trivially_copy_assignable<
+                                     typename std::remove_cv<T>::type>::value &&
+                                 std::is_trivially_destructible<T>::value &&
+                                 sizeof(T) <= 2 * sizeof(void*)> {};
+
+template <typename T>
+struct ForwardT : std::conditional<PassByValue<T>::value, T, T&&> {};
 
 // An Invoker takes a pointer to the type-erased invokable object, followed by
 // the arguments that the invokable object expects.
@@ -87,6 +91,12 @@
   (void)f;
 }
 
+template <typename Sig>
+void AssertNonNull(const AnyInvocable<Sig>& f) {
+  assert(f != nullptr);
+  (void)f;
+}
+
 template <typename F>
 void AssertNonNull(const F&) {}
 
diff --git a/abseil-cpp/absl/hash/BUILD.bazel b/abseil-cpp/absl/hash/BUILD.bazel
index 5b1e2d0..4346fc4 100644
--- a/abseil-cpp/absl/hash/BUILD.bazel
+++ b/abseil-cpp/absl/hash/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -37,10 +36,14 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":city",
+        ":low_level_hash",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:endian",
         "//absl/container:fixed_array",
+        "//absl/functional:function_ref",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/numeric:int128",
         "//absl/strings",
         "//absl/types:optional",
@@ -65,15 +68,23 @@
 
 cc_test(
     name = "hash_test",
-    srcs = ["hash_test.cc"],
+    srcs = [
+        "hash_test.cc",
+        "internal/hash_test.h",
+    ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":hash",
         ":hash_testing",
         ":spy_hash_state",
+        "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/container:btree",
+        "//absl/container:flat_hash_map",
         "//absl/container:flat_hash_set",
+        "//absl/container:node_hash_map",
+        "//absl/container:node_hash_set",
         "//absl/meta:type_traits",
         "//absl/numeric:int128",
         "//absl/strings:cord_test_helpers",
@@ -81,6 +92,47 @@
     ],
 )
 
+cc_test(
+    name = "hash_instantiated_test",
+    srcs = [
+        "hash_instantiated_test.cc",
+        "internal/hash_test.h",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":hash",
+        ":hash_testing",
+        "//absl/base:config",
+        "//absl/container:btree",
+        "//absl/container:flat_hash_map",
+        "//absl/container:flat_hash_set",
+        "//absl/container:node_hash_map",
+        "//absl/container:node_hash_set",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "hash_benchmark",
+    testonly = 1,
+    srcs = ["hash_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":hash",
+        "//absl/base:core_headers",
+        "//absl/container:flat_hash_set",
+        "//absl/random",
+        "//absl/strings",
+        "//absl/strings:cord",
+        "//absl/strings:cord_test_helpers",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
 cc_library(
     name = "spy_hash_state",
     testonly = 1,
@@ -120,3 +172,31 @@
         "@com_google_googletest//:gtest_main",
     ],
 )
+
+cc_library(
+    name = "low_level_hash",
+    srcs = ["internal/low_level_hash.cc"],
+    hdrs = ["internal/low_level_hash.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:endian",
+        "//absl/base:prefetch",
+        "//absl/numeric:int128",
+    ],
+)
+
+cc_test(
+    name = "low_level_hash_test",
+    srcs = ["internal/low_level_hash_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":low_level_hash",
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/abseil-cpp/absl/hash/CMakeLists.txt b/abseil-cpp/absl/hash/CMakeLists.txt
index 61365e9..65fd2a5 100644
--- a/abseil-cpp/absl/hash/CMakeLists.txt
+++ b/abseil-cpp/absl/hash/CMakeLists.txt
@@ -25,16 +25,20 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::bits
+    absl::city
+    absl::config
     absl::core_headers
     absl::endian
     absl::fixed_array
+    absl::function_ref
     absl::meta
     absl::int128
     absl::strings
     absl::optional
     absl::variant
     absl::utility
-    absl::city
+    absl::low_level_hash
   PUBLIC
 )
 
@@ -50,8 +54,9 @@
     absl::meta
     absl::strings
     absl::variant
-    gmock
+    GTest::gmock
   TESTONLY
+  PUBLIC
 )
 
 absl_cc_test(
@@ -59,6 +64,7 @@
     hash_test
   SRCS
     "hash_test.cc"
+    "internal/hash_test.h"
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
@@ -66,13 +72,42 @@
     absl::hash
     absl::hash_testing
     absl::core_headers
+    absl::btree
+    absl::flat_hash_map
     absl::flat_hash_set
+    absl::node_hash_map
+    absl::node_hash_set
     absl::spy_hash_state
     absl::meta
     absl::int128
-    gmock_main
+    GTest::gmock_main
 )
 
+absl_cc_test(
+  NAME
+    hash_instantiated_test
+  SRCS
+    "hash_test.cc"
+    "internal/hash_test.h"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::hash
+    absl::hash_testing
+    absl::config
+    absl::btree
+    absl::flat_hash_map
+    absl::flat_hash_set
+    absl::node_hash_map
+    absl::node_hash_set
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+#
+# Note: Even though external code should not depend on this target
+# directly, it must be marked PUBLIC since it is a dependency of
+# hash_testing.
 absl_cc_library(
   NAME
     spy_hash_state
@@ -85,8 +120,10 @@
     absl::strings
     absl::str_format
   TESTONLY
+  PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     city
@@ -111,6 +148,35 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::city
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    low_level_hash
+  HDRS
+    "internal/low_level_hash.h"
+  SRCS
+    "internal/low_level_hash.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::endian
+    absl::int128
+    absl::prefetch
+)
+
+absl_cc_test(
+  NAME
+    low_level_hash_test
+  SRCS
+    "internal/low_level_hash_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::low_level_hash
+    absl::strings
+    GTest::gmock_main
+)
diff --git a/abseil-cpp/absl/hash/hash.h b/abseil-cpp/absl/hash/hash.h
index 5de132c..470cca4 100644
--- a/abseil-cpp/absl/hash/hash.h
+++ b/abseil-cpp/absl/hash/hash.h
@@ -26,9 +26,9 @@
 //     support Abseil hashing without requiring you to define a hashing
 //     algorithm.
 //   * `HashState`, a type-erased class which implements the manipulation of the
-//     hash state (H) itself, contains member functions `combine()` and
-//     `combine_contiguous()`, which you can use to contribute to an existing
-//     hash state when hashing your types.
+//     hash state (H) itself; contains member functions `combine()`,
+//     `combine_contiguous()`, and `combine_unordered()`; and which you can use
+//     to contribute to an existing hash state when hashing your types.
 //
 // Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework
 // provides most of its utility by abstracting away the hash algorithm (and its
@@ -40,6 +40,11 @@
 // each process.  E.g., `absl::Hash<int>{}(9)` in one process and
 // `absl::Hash<int>{}(9)` in another process are likely to differ.
 //
+// `absl::Hash` may also produce different values from different dynamically
+// loaded libraries. For this reason, `absl::Hash` values must never cross
+// boundaries in dynamically loaded libraries (including when used in types like
+// hash containers.)
+//
 // `absl::Hash` is intended to strongly mix input bits with a target of passing
 // an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect).
 //
@@ -73,6 +78,10 @@
 #ifndef ABSL_HASH_HASH_H_
 #define ABSL_HASH_HASH_H_
 
+#include <tuple>
+#include <utility>
+
+#include "absl/functional/function_ref.h"
 #include "absl/hash/internal/hash.h"
 
 namespace absl {
@@ -101,18 +110,34 @@
 //   * std::unique_ptr and std::shared_ptr
 //   * All string-like types including:
 //     * absl::Cord
-//     * std::string
-//     * std::string_view (as well as any instance of std::basic_string that
-//       uses char and std::char_traits)
+//     * std::string (as well as any instance of std::basic_string that
+//       uses one of {char, wchar_t, char16_t, char32_t} and its associated
+//       std::char_traits)
+//     * std::string_view (as well as any instance of std::basic_string_view
+//       that uses one of {char, wchar_t, char16_t, char32_t} and its associated
+//       std::char_traits)
 //  * All the standard sequence containers (provided the elements are hashable)
-//  * All the standard ordered associative containers (provided the elements are
+//  * All the standard associative containers (provided the elements are
 //    hashable)
 //  * absl types such as the following:
 //    * absl::string_view
-//    * absl::InlinedVector
-//    * absl::FixedArray
 //    * absl::uint128
 //    * absl::Time, absl::Duration, and absl::TimeZone
+//  * absl containers (provided the elements are hashable) such as the
+//    following:
+//    * absl::flat_hash_set, absl::node_hash_set, absl::btree_set
+//    * absl::flat_hash_map, absl::node_hash_map, absl::btree_map
+//    * absl::btree_multiset, absl::btree_multimap
+//    * absl::InlinedVector
+//    * absl::FixedArray
+//
+// When absl::Hash is used to hash an unordered container with a custom hash
+// functor, the elements are hashed using default absl::Hash semantics, not
+// the custom hash functor.  This is consistent with the behavior of
+// operator==() on unordered containers, which compares elements pairwise with
+// operator==() rather than the custom equality functor.  It is usually a
+// mistake to use either operator==() or absl::Hash on unordered collections
+// that use functors incompatible with operator==() equality.
 //
 // Note: the list above is not meant to be exhaustive. Additional type support
 // may be added, in which case the above list will be updated.
@@ -151,7 +176,8 @@
 //   that are otherwise difficult to extend using `AbslHashValue()`. (See the
 //   `HashState` class below.)
 //
-// The "hash state" concept contains two member functions for mixing hash state:
+// The "hash state" concept contains three member functions for mixing hash
+// state:
 //
 // * `H::combine(state, values...)`
 //
@@ -185,6 +211,15 @@
 //    (it may perform internal optimizations). If you need this guarantee, use a
 //    loop instead.
 //
+// * `H::combine_unordered(state, begin, end)`
+//
+//    Combines a set of elements denoted by an iterator pair into a hash
+//    state, returning the updated state.  Note that the existing hash
+//    state is move-only and must be passed by value.
+//
+//    Unlike the other two methods, the hashing is order-independent.
+//    This can be used to hash unordered collections.
+//
 // -----------------------------------------------------------------------------
 // Adding Type Support to `absl::Hash`
 // -----------------------------------------------------------------------------
@@ -214,6 +249,26 @@
 template <typename T>
 using Hash = absl::hash_internal::Hash<T>;
 
+// HashOf
+//
+// absl::HashOf() is a helper that generates a hash from the values of its
+// arguments.  It dispatches to absl::Hash directly, as follows:
+//  * HashOf(t) == absl::Hash<T>{}(t)
+//  * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c))
+//
+// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when
+//  * The argument lists have pairwise identical C++ types
+//  * a1 == b1 && a2 == b2 && ...
+//
+// The requirement that the arguments match in both type and value is critical.
+// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if
+// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`.
+template <int&... ExplicitArgumentBarrier, typename... Types>
+size_t HashOf(const Types&... values) {
+  auto tuple = std::tie(values...);
+  return absl::Hash<decltype(tuple)>{}(tuple);
+}
+
 // HashState
 //
 // A type erased version of the hash state concept, for use in user-defined
@@ -221,8 +276,9 @@
 // classes, virtual functions, etc.). The type erasure adds overhead so it
 // should be avoided unless necessary.
 //
-// Note: This wrapper will only erase calls to:
+// Note: This wrapper will only erase calls to
 //     combine_contiguous(H, const unsigned char*, size_t)
+//     RunCombineUnordered(H, CombinerF)
 //
 // All other calls will be handled internally and will not invoke overloads
 // provided by the wrapped class.
@@ -296,6 +352,8 @@
  private:
   HashState() = default;
 
+  friend class HashState::HashStateBase;
+
   template <typename T>
   static void CombineContiguousImpl(void* p, const unsigned char* first,
                                     size_t size) {
@@ -307,16 +365,57 @@
   void Init(T* state) {
     state_ = state;
     combine_contiguous_ = &CombineContiguousImpl<T>;
+    run_combine_unordered_ = &RunCombineUnorderedImpl<T>;
+  }
+
+  template <typename HS>
+  struct CombineUnorderedInvoker {
+    template <typename T, typename ConsumerT>
+    void operator()(T inner_state, ConsumerT inner_cb) {
+      f(HashState::Create(&inner_state),
+        [&](HashState& inner_erased) { inner_cb(inner_erased.Real<T>()); });
+    }
+
+    absl::FunctionRef<void(HS, absl::FunctionRef<void(HS&)>)> f;
+  };
+
+  template <typename T>
+  static HashState RunCombineUnorderedImpl(
+      HashState state,
+      absl::FunctionRef<void(HashState, absl::FunctionRef<void(HashState&)>)>
+          f) {
+    // Note that this implementation assumes that inner_state and outer_state
+    // are the same type.  This isn't true in the SpyHash case, but SpyHash
+    // types are move-convertible to each other, so this still works.
+    T& real_state = state.Real<T>();
+    real_state = T::RunCombineUnordered(
+        std::move(real_state), CombineUnorderedInvoker<HashState>{f});
+    return state;
+  }
+
+  template <typename CombinerT>
+  static HashState RunCombineUnordered(HashState state, CombinerT combiner) {
+    auto* run = state.run_combine_unordered_;
+    return run(std::move(state), std::ref(combiner));
   }
 
   // Do not erase an already erased state.
   void Init(HashState* state) {
     state_ = state->state_;
     combine_contiguous_ = state->combine_contiguous_;
+    run_combine_unordered_ = state->run_combine_unordered_;
+  }
+
+  template <typename T>
+  T& Real() {
+    return *static_cast<T*>(state_);
   }
 
   void* state_;
   void (*combine_contiguous_)(void*, const unsigned char*, size_t);
+  HashState (*run_combine_unordered_)(
+      HashState state,
+      absl::FunctionRef<void(HashState, absl::FunctionRef<void(HashState&)>)>);
 };
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/hash/hash_benchmark.cc b/abseil-cpp/absl/hash/hash_benchmark.cc
new file mode 100644
index 0000000..8712a01
--- /dev/null
+++ b/abseil-cpp/absl/hash/hash_benchmark.cc
@@ -0,0 +1,323 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+#include <type_traits>
+#include <typeindex>
+#include <utility>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/hash/hash.h"
+#include "absl/random/random.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/cord_test_helpers.h"
+#include "absl/strings/string_view.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+using absl::Hash;
+
+template <template <typename> class H, typename T>
+void RunBenchmark(benchmark::State& state, T value) {
+  H<T> h;
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(value);
+    benchmark::DoNotOptimize(h(value));
+  }
+}
+
+}  // namespace
+
+template <typename T>
+using AbslHash = absl::Hash<T>;
+
+class TypeErasedInterface {
+ public:
+  virtual ~TypeErasedInterface() = default;
+
+  template <typename H>
+  friend H AbslHashValue(H state, const TypeErasedInterface& wrapper) {
+    state = H::combine(std::move(state), std::type_index(typeid(wrapper)));
+    wrapper.HashValue(absl::HashState::Create(&state));
+    return state;
+  }
+
+ private:
+  virtual void HashValue(absl::HashState state) const = 0;
+};
+
+template <typename T>
+struct TypeErasedAbslHash {
+  class Wrapper : public TypeErasedInterface {
+   public:
+    explicit Wrapper(const T& value) : value_(value) {}
+
+   private:
+    void HashValue(absl::HashState state) const override {
+      absl::HashState::combine(std::move(state), value_);
+    }
+
+    const T& value_;
+  };
+
+  size_t operator()(const T& value) {
+    return absl::Hash<Wrapper>{}(Wrapper(value));
+  }
+};
+
+template <typename FuncType>
+inline FuncType* ODRUseFunction(FuncType* ptr) {
+  volatile FuncType* dummy = ptr;
+  return dummy;
+}
+
+absl::Cord FlatCord(size_t size) {
+  absl::Cord result(std::string(size, 'a'));
+  result.Flatten();
+  return result;
+}
+
+absl::Cord FragmentedCord(size_t size) {
+  const size_t orig_size = size;
+  std::vector<std::string> chunks;
+  size_t chunk_size = std::max<size_t>(1, size / 10);
+  while (size > chunk_size) {
+    chunks.push_back(std::string(chunk_size, 'a'));
+    size -= chunk_size;
+  }
+  if (size > 0) {
+    chunks.push_back(std::string(size, 'a'));
+  }
+  absl::Cord result = absl::MakeFragmentedCord(chunks);
+  (void) orig_size;
+  assert(result.size() == orig_size);
+  return result;
+}
+
+template <typename T>
+std::vector<T> Vector(size_t count) {
+  std::vector<T> result;
+  for (size_t v = 0; v < count; ++v) {
+    result.push_back(v);
+  }
+  return result;
+}
+
+// Bogus type that replicates an unorderd_set's bit mixing, but with
+// vector-speed iteration. This is intended to measure the overhead of unordered
+// hashing without counting the speed of unordered_set iteration.
+template <typename T>
+struct FastUnorderedSet {
+  explicit FastUnorderedSet(size_t count) {
+    for (size_t v = 0; v < count; ++v) {
+      values.push_back(v);
+    }
+  }
+  std::vector<T> values;
+
+  template <typename H>
+  friend H AbslHashValue(H h, const FastUnorderedSet& fus) {
+    return H::combine(H::combine_unordered(std::move(h), fus.values.begin(),
+                                           fus.values.end()),
+                      fus.values.size());
+  }
+};
+
+template <typename T>
+absl::flat_hash_set<T> FlatHashSet(size_t count) {
+  absl::flat_hash_set<T> result;
+  for (size_t v = 0; v < count; ++v) {
+    result.insert(v);
+  }
+  return result;
+}
+
+// Generates a benchmark and a codegen method for the provided types.  The
+// codegen method provides a well known entrypoint for dumping assembly.
+#define MAKE_BENCHMARK(hash, name, ...)                          \
+  namespace {                                                    \
+  void BM_##hash##_##name(benchmark::State& state) {             \
+    RunBenchmark<hash>(state, __VA_ARGS__);                      \
+  }                                                              \
+  BENCHMARK(BM_##hash##_##name);                                 \
+  }                                                              \
+  size_t Codegen##hash##name(const decltype(__VA_ARGS__)& arg);  \
+  size_t Codegen##hash##name(const decltype(__VA_ARGS__)& arg) { \
+    return hash<decltype(__VA_ARGS__)>{}(arg);                   \
+  }                                                              \
+  bool absl_hash_test_odr_use##hash##name =                      \
+      ODRUseFunction(&Codegen##hash##name);
+
+MAKE_BENCHMARK(AbslHash, Int32, int32_t{});
+MAKE_BENCHMARK(AbslHash, Int64, int64_t{});
+MAKE_BENCHMARK(AbslHash, Double, 1.2);
+MAKE_BENCHMARK(AbslHash, DoubleZero, 0.0);
+MAKE_BENCHMARK(AbslHash, PairInt32Int32, std::pair<int32_t, int32_t>{});
+MAKE_BENCHMARK(AbslHash, PairInt64Int64, std::pair<int64_t, int64_t>{});
+MAKE_BENCHMARK(AbslHash, TupleInt32BoolInt64,
+               std::tuple<int32_t, bool, int64_t>{});
+MAKE_BENCHMARK(AbslHash, String_0, std::string());
+MAKE_BENCHMARK(AbslHash, String_10, std::string(10, 'a'));
+MAKE_BENCHMARK(AbslHash, String_30, std::string(30, 'a'));
+MAKE_BENCHMARK(AbslHash, String_90, std::string(90, 'a'));
+MAKE_BENCHMARK(AbslHash, String_200, std::string(200, 'a'));
+MAKE_BENCHMARK(AbslHash, String_5000, std::string(5000, 'a'));
+MAKE_BENCHMARK(AbslHash, Cord_Flat_0, absl::Cord());
+MAKE_BENCHMARK(AbslHash, Cord_Flat_10, FlatCord(10));
+MAKE_BENCHMARK(AbslHash, Cord_Flat_30, FlatCord(30));
+MAKE_BENCHMARK(AbslHash, Cord_Flat_90, FlatCord(90));
+MAKE_BENCHMARK(AbslHash, Cord_Flat_200, FlatCord(200));
+MAKE_BENCHMARK(AbslHash, Cord_Flat_5000, FlatCord(5000));
+MAKE_BENCHMARK(AbslHash, Cord_Fragmented_200, FragmentedCord(200));
+MAKE_BENCHMARK(AbslHash, Cord_Fragmented_5000, FragmentedCord(5000));
+MAKE_BENCHMARK(AbslHash, VectorInt64_10, Vector<int64_t>(10));
+MAKE_BENCHMARK(AbslHash, VectorInt64_100, Vector<int64_t>(100));
+MAKE_BENCHMARK(AbslHash, VectorInt64_1000, Vector<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, VectorDouble_10, Vector<double>(10));
+MAKE_BENCHMARK(AbslHash, VectorDouble_100, Vector<double>(100));
+MAKE_BENCHMARK(AbslHash, VectorDouble_1000, Vector<double>(1000));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_10, FlatHashSet<int64_t>(10));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_100, FlatHashSet<int64_t>(100));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_1000, FlatHashSet<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_10, FlatHashSet<double>(10));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_100, FlatHashSet<double>(100));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_1000, FlatHashSet<double>(1000));
+MAKE_BENCHMARK(AbslHash, FastUnorderedSetInt64_1000,
+               FastUnorderedSet<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, FastUnorderedSetDouble_1000,
+               FastUnorderedSet<double>(1000));
+MAKE_BENCHMARK(AbslHash, PairStringString_0,
+               std::make_pair(std::string(), std::string()));
+MAKE_BENCHMARK(AbslHash, PairStringString_10,
+               std::make_pair(std::string(10, 'a'), std::string(10, 'b')));
+MAKE_BENCHMARK(AbslHash, PairStringString_30,
+               std::make_pair(std::string(30, 'a'), std::string(30, 'b')));
+MAKE_BENCHMARK(AbslHash, PairStringString_90,
+               std::make_pair(std::string(90, 'a'), std::string(90, 'b')));
+MAKE_BENCHMARK(AbslHash, PairStringString_200,
+               std::make_pair(std::string(200, 'a'), std::string(200, 'b')));
+MAKE_BENCHMARK(AbslHash, PairStringString_5000,
+               std::make_pair(std::string(5000, 'a'), std::string(5000, 'b')));
+
+MAKE_BENCHMARK(TypeErasedAbslHash, Int32, int32_t{});
+MAKE_BENCHMARK(TypeErasedAbslHash, Int64, int64_t{});
+MAKE_BENCHMARK(TypeErasedAbslHash, PairInt32Int32,
+               std::pair<int32_t, int32_t>{});
+MAKE_BENCHMARK(TypeErasedAbslHash, PairInt64Int64,
+               std::pair<int64_t, int64_t>{});
+MAKE_BENCHMARK(TypeErasedAbslHash, TupleInt32BoolInt64,
+               std::tuple<int32_t, bool, int64_t>{});
+MAKE_BENCHMARK(TypeErasedAbslHash, String_0, std::string());
+MAKE_BENCHMARK(TypeErasedAbslHash, String_10, std::string(10, 'a'));
+MAKE_BENCHMARK(TypeErasedAbslHash, String_30, std::string(30, 'a'));
+MAKE_BENCHMARK(TypeErasedAbslHash, String_90, std::string(90, 'a'));
+MAKE_BENCHMARK(TypeErasedAbslHash, String_200, std::string(200, 'a'));
+MAKE_BENCHMARK(TypeErasedAbslHash, String_5000, std::string(5000, 'a'));
+MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_10,
+               std::vector<double>(10, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_100,
+               std::vector<double>(100, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_1000,
+               std::vector<double>(1000, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_10,
+               FlatHashSet<int64_t>(10));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_100,
+               FlatHashSet<int64_t>(100));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_1000,
+               FlatHashSet<int64_t>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_10,
+               FlatHashSet<double>(10));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_100,
+               FlatHashSet<double>(100));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_1000,
+               FlatHashSet<double>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetInt64_1000,
+               FastUnorderedSet<int64_t>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetDouble_1000,
+               FastUnorderedSet<double>(1000));
+
+// The latency benchmark attempts to model the speed of the hash function in
+// production. When a hash function is used for hashtable lookups it is rarely
+// used to hash N items in a tight loop nor on constant sized strings. Instead,
+// after hashing there is a potential equality test plus a (usually) large
+// amount of user code. To simulate this effectively we introduce a data
+// dependency between elements we hash by using the hash of the Nth element as
+// the selector of the N+1th element to hash. This isolates the hash function
+// code much like in production. As a bonus we use the hash to generate strings
+// of size [1,N] (instead of fixed N) to disable perfect branch predictions in
+// hash function implementations.
+namespace {
+// 16kb fits in L1 cache of most CPUs we care about. Keeping memory latency low
+// will allow us to attribute most time to CPU which means more accurate
+// measurements.
+static constexpr size_t kEntropySize = 16 << 10;
+static char entropy[kEntropySize + 1024];
+ABSL_ATTRIBUTE_UNUSED static const bool kInitialized = [] {
+  absl::BitGen gen;
+  static_assert(sizeof(entropy) % sizeof(uint64_t) == 0, "");
+  for (int i = 0; i != sizeof(entropy); i += sizeof(uint64_t)) {
+    auto rand = absl::Uniform<uint64_t>(gen);
+    memcpy(&entropy[i], &rand, sizeof(uint64_t));
+  }
+  return true;
+}();
+}  // namespace
+
+template <class T>
+struct PodRand {
+  static_assert(std::is_pod<T>::value, "");
+  static_assert(kEntropySize + sizeof(T) < sizeof(entropy), "");
+
+  T Get(size_t i) const {
+    T v;
+    memcpy(&v, &entropy[i % kEntropySize], sizeof(T));
+    return v;
+  }
+};
+
+template <size_t N>
+struct StringRand {
+  static_assert(kEntropySize + N < sizeof(entropy), "");
+
+  absl::string_view Get(size_t i) const {
+    // This has a small bias towards small numbers. Because max N is ~200 this
+    // is very small and prefer to be very fast instead of absolutely accurate.
+    // Also we pass N = 2^K+1 so that mod reduces to a bitand.
+    size_t s = (i % (N - 1)) + 1;
+    return {&entropy[i % kEntropySize], s};
+  }
+};
+
+#define MAKE_LATENCY_BENCHMARK(hash, name, ...)              \
+  namespace {                                                \
+  void BM_latency_##hash##_##name(benchmark::State& state) { \
+    __VA_ARGS__ r;                                           \
+    hash<decltype(r.Get(0))> h;                              \
+    size_t i = 871401241;                                    \
+    for (auto _ : state) {                                   \
+      benchmark::DoNotOptimize(i = h(r.Get(i)));             \
+    }                                                        \
+  }                                                          \
+  BENCHMARK(BM_latency_##hash##_##name);                     \
+  }  // namespace
+
+MAKE_LATENCY_BENCHMARK(AbslHash, Int32, PodRand<int32_t>);
+MAKE_LATENCY_BENCHMARK(AbslHash, Int64, PodRand<int64_t>);
+MAKE_LATENCY_BENCHMARK(AbslHash, String9, StringRand<9>);
+MAKE_LATENCY_BENCHMARK(AbslHash, String33, StringRand<33>);
+MAKE_LATENCY_BENCHMARK(AbslHash, String65, StringRand<65>);
+MAKE_LATENCY_BENCHMARK(AbslHash, String257, StringRand<257>);
diff --git a/abseil-cpp/absl/hash/hash_instantiated_test.cc b/abseil-cpp/absl/hash/hash_instantiated_test.cc
new file mode 100644
index 0000000..e65de9c
--- /dev/null
+++ b/abseil-cpp/absl/hash/hash_instantiated_test.cc
@@ -0,0 +1,224 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains a few select absl::Hash tests that, due to their reliance
+// on INSTANTIATE_TYPED_TEST_SUITE_P, require a large amount of memory to
+// compile. Put new tests in hash_test.cc, not this file.
+
+#include "absl/hash/hash.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <deque>
+#include <forward_list>
+#include <initializer_list>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
+#include "absl/hash/hash_testing.h"
+#include "absl/hash/internal/hash_test.h"
+
+namespace {
+
+using ::absl::hash_test_internal::is_hashable;
+using ::absl::hash_test_internal::TypeErasedContainer;
+
+// Dummy type with unordered equality and hashing semantics.  This preserves
+// input order internally, and is used below to ensure we get test coverage
+// for equal sequences with different iteraton orders.
+template <typename T>
+class UnorderedSequence {
+ public:
+  UnorderedSequence() = default;
+  template <typename TT>
+  UnorderedSequence(std::initializer_list<TT> l)
+      : values_(l.begin(), l.end()) {}
+  template <typename ForwardIterator,
+            typename std::enable_if<!std::is_integral<ForwardIterator>::value,
+                                    bool>::type = true>
+  UnorderedSequence(ForwardIterator begin, ForwardIterator end)
+      : values_(begin, end) {}
+  // one-argument constructor of value type T, to appease older toolchains that
+  // get confused by one-element initializer lists in some contexts
+  explicit UnorderedSequence(const T& v) : values_(&v, &v + 1) {}
+
+  using value_type = T;
+
+  size_t size() const { return values_.size(); }
+  typename std::vector<T>::const_iterator begin() const {
+    return values_.begin();
+  }
+  typename std::vector<T>::const_iterator end() const { return values_.end(); }
+
+  friend bool operator==(const UnorderedSequence& lhs,
+                         const UnorderedSequence& rhs) {
+    return lhs.size() == rhs.size() &&
+           std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
+  }
+  friend bool operator!=(const UnorderedSequence& lhs,
+                         const UnorderedSequence& rhs) {
+    return !(lhs == rhs);
+  }
+  template <typename H>
+  friend H AbslHashValue(H h, const UnorderedSequence& u) {
+    return H::combine(H::combine_unordered(std::move(h), u.begin(), u.end()),
+                      u.size());
+  }
+
+ private:
+  std::vector<T> values_;
+};
+
+template <typename T>
+class HashValueSequenceTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueSequenceTest);
+
+TYPED_TEST_P(HashValueSequenceTest, BasicUsage) {
+  EXPECT_TRUE((is_hashable<TypeParam>::value));
+
+  using IntType = typename TypeParam::value_type;
+  auto a = static_cast<IntType>(0);
+  auto b = static_cast<IntType>(23);
+  auto c = static_cast<IntType>(42);
+
+  std::vector<TypeParam> exemplars = {
+      TypeParam(),        TypeParam(),        TypeParam{a, b, c},
+      TypeParam{a, c, b}, TypeParam{c, a, b}, TypeParam{a},
+      TypeParam{a, a},    TypeParam{a, a, a}, TypeParam{a, a, b},
+      TypeParam{a, b, a}, TypeParam{b, a, a}, TypeParam{a, b},
+      TypeParam{b, c}};
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueSequenceTest, BasicUsage);
+using IntSequenceTypes = testing::Types<
+    std::deque<int>, std::forward_list<int>, std::list<int>, std::vector<int>,
+    std::vector<bool>, TypeErasedContainer<std::vector<int>>, std::set<int>,
+    std::multiset<int>, UnorderedSequence<int>,
+    TypeErasedContainer<UnorderedSequence<int>>, std::unordered_set<int>,
+    std::unordered_multiset<int>, absl::flat_hash_set<int>,
+    absl::node_hash_set<int>, absl::btree_set<int>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueSequenceTest, IntSequenceTypes);
+
+template <typename T>
+class HashValueNestedSequenceTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueNestedSequenceTest);
+
+TYPED_TEST_P(HashValueNestedSequenceTest, BasicUsage) {
+  using T = TypeParam;
+  using V = typename T::value_type;
+  std::vector<T> exemplars = {
+      // empty case
+      T{},
+      // sets of empty sets
+      T{V{}}, T{V{}, V{}}, T{V{}, V{}, V{}},
+      // multisets of different values
+      T{V{1}}, T{V{1, 1}, V{1, 1}}, T{V{1, 1, 1}, V{1, 1, 1}, V{1, 1, 1}},
+      // various orderings of same nested sets
+      T{V{}, V{1, 2}}, T{V{}, V{2, 1}}, T{V{1, 2}, V{}}, T{V{2, 1}, V{}},
+      // various orderings of various nested sets, case 2
+      T{V{1, 2}, V{3, 4}}, T{V{1, 2}, V{4, 3}}, T{V{1, 3}, V{2, 4}},
+      T{V{1, 3}, V{4, 2}}, T{V{1, 4}, V{2, 3}}, T{V{1, 4}, V{3, 2}},
+      T{V{2, 3}, V{1, 4}}, T{V{2, 3}, V{4, 1}}, T{V{2, 4}, V{1, 3}},
+      T{V{2, 4}, V{3, 1}}, T{V{3, 4}, V{1, 2}}, T{V{3, 4}, V{2, 1}}};
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueNestedSequenceTest, BasicUsage);
+template <typename T>
+using TypeErasedSet = TypeErasedContainer<UnorderedSequence<T>>;
+
+using NestedIntSequenceTypes = testing::Types<
+    std::vector<std::vector<int>>, std::vector<UnorderedSequence<int>>,
+    std::vector<TypeErasedSet<int>>, UnorderedSequence<std::vector<int>>,
+    UnorderedSequence<UnorderedSequence<int>>,
+    UnorderedSequence<TypeErasedSet<int>>, TypeErasedSet<std::vector<int>>,
+    TypeErasedSet<UnorderedSequence<int>>, TypeErasedSet<TypeErasedSet<int>>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueNestedSequenceTest,
+                               NestedIntSequenceTypes);
+
+template <typename T>
+class HashValueAssociativeMapTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueAssociativeMapTest);
+
+TYPED_TEST_P(HashValueAssociativeMapTest, BasicUsage) {
+  using M = TypeParam;
+  using V = typename M::value_type;
+  std::vector<M> exemplars{M{},
+                           M{V{0, "foo"}},
+                           M{V{1, "foo"}},
+                           M{V{0, "bar"}},
+                           M{V{1, "bar"}},
+                           M{V{0, "foo"}, V{42, "bar"}},
+                           M{V{42, "bar"}, V{0, "foo"}},
+                           M{V{1, "foo"}, V{42, "bar"}},
+                           M{V{1, "foo"}, V{43, "bar"}},
+                           M{V{1, "foo"}, V{43, "baz"}}};
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMapTest, BasicUsage);
+using AssociativeMapTypes = testing::Types<
+    std::map<int, std::string>, std::unordered_map<int, std::string>,
+    absl::flat_hash_map<int, std::string>,
+    absl::node_hash_map<int, std::string>, absl::btree_map<int, std::string>,
+    UnorderedSequence<std::pair<const int, std::string>>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMapTest,
+                               AssociativeMapTypes);
+
+template <typename T>
+class HashValueAssociativeMultimapTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest);
+
+TYPED_TEST_P(HashValueAssociativeMultimapTest, BasicUsage) {
+  using MM = TypeParam;
+  using V = typename MM::value_type;
+  std::vector<MM> exemplars{MM{},
+                            MM{V{0, "foo"}},
+                            MM{V{1, "foo"}},
+                            MM{V{0, "bar"}},
+                            MM{V{1, "bar"}},
+                            MM{V{0, "foo"}, V{0, "bar"}},
+                            MM{V{0, "bar"}, V{0, "foo"}},
+                            MM{V{0, "foo"}, V{42, "bar"}},
+                            MM{V{1, "foo"}, V{42, "bar"}},
+                            MM{V{1, "foo"}, V{1, "foo"}, V{43, "bar"}},
+                            MM{V{1, "foo"}, V{43, "bar"}, V{1, "foo"}},
+                            MM{V{1, "foo"}, V{43, "baz"}}};
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest, BasicUsage);
+using AssociativeMultimapTypes =
+    testing::Types<std::multimap<int, std::string>,
+                   std::unordered_multimap<int, std::string>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMultimapTest,
+                               AssociativeMultimapTypes);
+
+}  // namespace
diff --git a/abseil-cpp/absl/hash/hash_test.cc b/abseil-cpp/absl/hash/hash_test.cc
index 39ba24a..a0e2e4a 100644
--- a/abseil-cpp/absl/hash/hash_test.cc
+++ b/abseil-cpp/absl/hash/hash_test.cc
@@ -14,12 +14,15 @@
 
 #include "absl/hash/hash.h"
 
+#include <algorithm>
 #include <array>
 #include <bitset>
+#include <cstdint>
 #include <cstring>
 #include <deque>
 #include <forward_list>
 #include <functional>
+#include <initializer_list>
 #include <iterator>
 #include <limits>
 #include <list>
@@ -32,20 +35,38 @@
 #include <tuple>
 #include <type_traits>
 #include <unordered_map>
+#include <unordered_set>
 #include <utility>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/flat_hash_map.h"
 #include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
 #include "absl/hash/hash_testing.h"
+#include "absl/hash/internal/hash_test.h"
 #include "absl/hash/internal/spy_hash_state.h"
 #include "absl/meta/type_traits.h"
 #include "absl/numeric/int128.h"
 #include "absl/strings/cord_test_helpers.h"
 
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
 namespace {
 
+using ::absl::hash_test_internal::is_hashable;
+using ::absl::hash_test_internal::TypeErasedContainer;
+using ::absl::hash_test_internal::TypeErasedValue;
+
+template <typename T>
+using TypeErasedVector = TypeErasedContainer<std::vector<T>>;
+
 using absl::Hash;
 using absl::hash_internal::SpyHashState;
 
@@ -59,11 +80,6 @@
   return SpyHashState::combine(SpyHashState(), value);
 }
 
-// Helper trait to verify if T is hashable. We use absl::Hash's poison status to
-// detect it.
-template <typename T>
-using is_hashable = std::is_default_constructible<absl::Hash<T>>;
-
 TYPED_TEST_P(HashValueIntTest, BasicUsage) {
   EXPECT_TRUE((is_hashable<TypeParam>::value));
 
@@ -81,10 +97,10 @@
             absl::Hash<std::tuple<TypeParam>>{}(std::tuple<TypeParam>(n)));
 }
 
-REGISTER_TYPED_TEST_CASE_P(HashValueIntTest, BasicUsage, FastPath);
-using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t, uint32_t,
-                                uint64_t, size_t>;
-INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueIntTest, IntTypes);
+REGISTER_TYPED_TEST_SUITE_P(HashValueIntTest, BasicUsage, FastPath);
+using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t,
+                                uint32_t, uint64_t, size_t>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueIntTest, IntTypes);
 
 enum LegacyEnum { kValue1, kValue2, kValue3 };
 
@@ -127,6 +143,8 @@
 
 TEST(HashValueTest, Pointer) {
   EXPECT_TRUE((is_hashable<int*>::value));
+  EXPECT_TRUE((is_hashable<int(*)(char, float)>::value));
+  EXPECT_TRUE((is_hashable<void(*)(int, int, ...)>::value));
 
   int i;
   int* ptr = &i;
@@ -162,10 +180,89 @@
     // Limit the scope to the bits we would be using for Swisstable.
     constexpr size_t kMask = (1 << (kLog2NumValues + 7)) - 1;
     size_t stuck_bits = (~bits_or | bits_and) & kMask;
-    EXPECT_EQ(stuck_bits, 0) << "0x" << std::hex << stuck_bits;
+    EXPECT_EQ(stuck_bits, 0u) << "0x" << std::hex << stuck_bits;
   }
 }
 
+TEST(HashValueTest, PointerToMember) {
+  struct Bass {
+    void q() {}
+  };
+
+  struct A : Bass {
+    virtual ~A() = default;
+    virtual void vfa() {}
+
+    static auto pq() -> void (A::*)() { return &A::q; }
+  };
+
+  struct B : Bass {
+    virtual ~B() = default;
+    virtual void vfb() {}
+
+    static auto pq() -> void (B::*)() { return &B::q; }
+  };
+
+  struct Foo : A, B {
+    void f1() {}
+    void f2() const {}
+
+    int g1() & { return 0; }
+    int g2() const & { return 0; }
+    int g3() && { return 0; }
+    int g4() const && { return 0; }
+
+    int h1() & { return 0; }
+    int h2() const & { return 0; }
+    int h3() && { return 0; }
+    int h4() const && { return 0; }
+
+    int a;
+    int b;
+
+    const int c = 11;
+    const int d = 22;
+  };
+
+  EXPECT_TRUE((is_hashable<float Foo::*>::value));
+  EXPECT_TRUE((is_hashable<double (Foo::*)(int, int)&&>::value));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+      std::make_tuple(&Foo::a, &Foo::b, static_cast<int Foo::*>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+      std::make_tuple(&Foo::c, &Foo::d, static_cast<const int Foo::*>(nullptr),
+                      &Foo::a, &Foo::b)));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      &Foo::f1, static_cast<void (Foo::*)()>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      &Foo::f2, static_cast<void (Foo::*)() const>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      &Foo::g1, &Foo::h1, static_cast<int (Foo::*)() &>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      &Foo::g2, &Foo::h2, static_cast<int (Foo::*)() const &>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      &Foo::g3, &Foo::h3, static_cast<int (Foo::*)() &&>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      &Foo::g4, &Foo::h4, static_cast<int (Foo::*)() const &&>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+      std::make_tuple(static_cast<void (Foo::*)()>(&Foo::vfa),
+                      static_cast<void (Foo::*)()>(&Foo::vfb),
+                      static_cast<void (Foo::*)()>(nullptr))));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+      std::make_tuple(static_cast<void (Foo::*)()>(Foo::A::pq()),
+                      static_cast<void (Foo::*)()>(Foo::B::pq()),
+                      static_cast<void (Foo::*)()>(nullptr))));
+}
+
 TEST(HashValueTest, PairAndTuple) {
   EXPECT_TRUE((is_hashable<std::pair<int, int>>::value));
   EXPECT_TRUE((is_hashable<std::pair<const int&, const int&>>::value));
@@ -348,6 +445,47 @@
       std::u32string(U"Iñtërnâtiônàlizætiøn"))));
 }
 
+TEST(HashValueTest, WStringView) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  EXPECT_TRUE((is_hashable<std::wstring_view>::value));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+      std::wstring_view(), std::wstring_view(L"ABC"), std::wstring_view(L"ABC"),
+      std::wstring_view(L"Some other different string_view"),
+      std::wstring_view(L"Iñtërnâtiônàlizætiøn"))));
+#endif
+}
+
+TEST(HashValueTest, U16StringView) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  EXPECT_TRUE((is_hashable<std::u16string_view>::value));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+      std::make_tuple(std::u16string_view(), std::u16string_view(u"ABC"),
+                      std::u16string_view(u"ABC"),
+                      std::u16string_view(u"Some other different string_view"),
+                      std::u16string_view(u"Iñtërnâtiônàlizætiøn"))));
+#endif
+}
+
+TEST(HashValueTest, U32StringView) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+  GTEST_SKIP();
+#else
+  EXPECT_TRUE((is_hashable<std::u32string_view>::value));
+
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+      std::make_tuple(std::u32string_view(), std::u32string_view(U"ABC"),
+                      std::u32string_view(U"ABC"),
+                      std::u32string_view(U"Some other different string_view"),
+                      std::u32string_view(U"Iñtërnâtiônàlizætiøn"))));
+#endif
+}
+
 TEST(HashValueTest, StdArray) {
   EXPECT_TRUE((is_hashable<std::array<int, 3>>::value));
 
@@ -381,31 +519,6 @@
        std::bitset<kNumBits>(bit_strings[5].c_str())}));
 }  // namespace
 
-template <typename T>
-class HashValueSequenceTest : public testing::Test {
-};
-TYPED_TEST_SUITE_P(HashValueSequenceTest);
-
-TYPED_TEST_P(HashValueSequenceTest, BasicUsage) {
-  EXPECT_TRUE((is_hashable<TypeParam>::value));
-
-  using ValueType = typename TypeParam::value_type;
-  auto a = static_cast<ValueType>(0);
-  auto b = static_cast<ValueType>(23);
-  auto c = static_cast<ValueType>(42);
-
-  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
-      std::make_tuple(TypeParam(), TypeParam{}, TypeParam{a, b, c},
-                      TypeParam{a, b}, TypeParam{b, c})));
-}
-
-REGISTER_TYPED_TEST_CASE_P(HashValueSequenceTest, BasicUsage);
-using IntSequenceTypes =
-    testing::Types<std::deque<int>, std::forward_list<int>, std::list<int>,
-                   std::vector<int>, std::vector<bool>, std::set<int>,
-                   std::multiset<int>>;
-INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueSequenceTest, IntSequenceTypes);
-
 // Private type that only supports AbslHashValue to make sure our chosen hash
 // implementation is recursive within absl::Hash.
 // It uses std::abs() on the value to provide different bitwise representations
@@ -508,10 +621,10 @@
   //
   // This test is run on a buffer that is a multiple of the stride size, and one
   // that isn't.
-  for (size_t big_buffer_size : {1024 * 2 + 512, 1024 * 3}) {
+  for (size_t big_buffer_size : {1024u * 2 + 512u, 1024u * 3}) {
     SCOPED_TRACE(big_buffer_size);
     std::string big_buffer;
-    for (int i = 0; i < big_buffer_size; ++i) {
+    for (size_t i = 0; i < big_buffer_size; ++i) {
       // Arbitrary string
       big_buffer.push_back(32 + (i * (i / 3)) % 64);
     }
@@ -564,23 +677,6 @@
 #endif
 }
 
-TEST(HashValueTest, Maps) {
-  EXPECT_TRUE((is_hashable<std::map<int, std::string>>::value));
-
-  using M = std::map<int, std::string>;
-  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
-      M{}, M{{0, "foo"}}, M{{1, "foo"}}, M{{0, "bar"}}, M{{1, "bar"}},
-      M{{0, "foo"}, {42, "bar"}}, M{{1, "foo"}, {42, "bar"}},
-      M{{1, "foo"}, {43, "bar"}}, M{{1, "foo"}, {43, "baz"}})));
-
-  using MM = std::multimap<int, std::string>;
-  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
-      MM{}, MM{{0, "foo"}}, MM{{1, "foo"}}, MM{{0, "bar"}}, MM{{1, "bar"}},
-      MM{{0, "foo"}, {0, "bar"}}, MM{{0, "bar"}, {0, "foo"}},
-      MM{{0, "foo"}, {42, "bar"}}, MM{{1, "foo"}, {42, "bar"}},
-      MM{{1, "foo"}, {1, "foo"}, {43, "bar"}}, MM{{1, "foo"}, {43, "baz"}})));
-}
-
 TEST(HashValueTest, ReferenceWrapper) {
   EXPECT_TRUE(is_hashable<std::reference_wrapper<Private>>::value);
 
@@ -634,8 +730,8 @@
   EXPECT_FALSE(absl::is_copy_assignable<absl::Hash<X>>::value);
   EXPECT_FALSE(absl::is_move_assignable<absl::Hash<X>>::value);
   EXPECT_FALSE(IsHashCallable<X>::value);
-#if !defined(__GNUC__) || __GNUC__ < 9
-  // This doesn't compile on GCC 9.
+#if !defined(__GNUC__) || defined(__clang__)
+  // TODO(b/144368551): As of GCC 8.4 this does not compile.
   EXPECT_FALSE(IsAggregateInitializable<absl::Hash<X>>::value);
 #endif
 }
@@ -818,10 +914,10 @@
             Hash<CombineVariadic<TypeParam>>()({}));
 }
 
-REGISTER_TYPED_TEST_CASE_P(HashIntTest, BasicUsage);
-using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t, uint32_t,
-                                uint64_t, size_t>;
-INSTANTIATE_TYPED_TEST_CASE_P(My, HashIntTest, IntTypes);
+REGISTER_TYPED_TEST_SUITE_P(HashIntTest, BasicUsage);
+using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t,
+                                uint32_t, uint64_t, size_t>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashIntTest, IntTypes);
 
 struct StructWithPadding {
   char c;
@@ -865,10 +961,10 @@
   unsigned char buffer2[kNumStructs * sizeof(StructWithPadding)];
   std::memset(buffer2, 255, sizeof(buffer2));
   auto* s2 = reinterpret_cast<StructWithPadding*>(buffer2);
-  for (int i = 0; i < kNumStructs; ++i) {
+  for (size_t i = 0; i < kNumStructs; ++i) {
     SCOPED_TRACE(i);
-    s1[i].c = s2[i].c = '0' + i;
-    s1[i].i = s2[i].i = i;
+    s1[i].c = s2[i].c = static_cast<char>('0' + i);
+    s1[i].i = s2[i].i = static_cast<int>(i);
     ASSERT_FALSE(memcmp(buffer1 + i * sizeof(StructWithPadding),
                         buffer2 + i * sizeof(StructWithPadding),
                         sizeof(StructWithPadding)) == 0)
@@ -928,29 +1024,23 @@
   Hash<IntAndString>()(IntAndString{0, std::string(63, '0')});
 }
 
-struct TypeErased {
-  size_t n;
-
-  template <typename H>
-  friend H AbslHashValue(H hash_state, const TypeErased& v) {
-    v.HashValue(absl::HashState::Create(&hash_state));
-    return hash_state;
-  }
-
-  void HashValue(absl::HashState state) const {
-    absl::HashState::combine(std::move(state), n);
-  }
-};
-
 TEST(HashTest, TypeErased) {
-  EXPECT_TRUE((is_hashable<TypeErased>::value));
-  EXPECT_TRUE((is_hashable<std::pair<TypeErased, int>>::value));
+  EXPECT_TRUE((is_hashable<TypeErasedValue<size_t>>::value));
+  EXPECT_TRUE((is_hashable<std::pair<TypeErasedValue<size_t>, int>>::value));
 
-  EXPECT_EQ(SpyHash(TypeErased{7}), SpyHash(size_t{7}));
-  EXPECT_NE(SpyHash(TypeErased{7}), SpyHash(size_t{13}));
+  EXPECT_EQ(SpyHash(TypeErasedValue<size_t>(7)), SpyHash(size_t{7}));
+  EXPECT_NE(SpyHash(TypeErasedValue<size_t>(7)), SpyHash(size_t{13}));
 
-  EXPECT_EQ(SpyHash(std::make_pair(TypeErased{7}, 17)),
+  EXPECT_EQ(SpyHash(std::make_pair(TypeErasedValue<size_t>(7), 17)),
             SpyHash(std::make_pair(size_t{7}, 17)));
+
+  absl::flat_hash_set<absl::flat_hash_set<int>> ss = {{1, 2}, {3, 4}};
+  TypeErasedContainer<absl::flat_hash_set<absl::flat_hash_set<int>>> es = {
+      absl::flat_hash_set<int>{1, 2}, {3, 4}};
+  absl::flat_hash_set<TypeErasedContainer<absl::flat_hash_set<int>>> se = {
+      {1, 2}, {3, 4}};
+  EXPECT_EQ(SpyHash(ss), SpyHash(es));
+  EXPECT_EQ(SpyHash(ss), SpyHash(se));
 }
 
 struct ValueWithBoolConversion {
@@ -962,7 +1052,9 @@
 namespace std {
 template <>
 struct hash<ValueWithBoolConversion> {
-  size_t operator()(ValueWithBoolConversion v) { return v.i; }
+  size_t operator()(ValueWithBoolConversion v) {
+    return static_cast<size_t>(v.i);
+  }
 };
 }  // namespace std
 
@@ -973,4 +1065,49 @@
             absl::Hash<ValueWithBoolConversion>()(ValueWithBoolConversion{1}));
 }
 
+TEST(HashOf, MatchesHashForSingleArgument) {
+  std::string s = "forty two";
+  double d = 42.0;
+  std::tuple<int, int> t{4, 2};
+  int i = 42;
+  int neg_i = -42;
+  int16_t i16 = 42;
+  int16_t neg_i16 = -42;
+  int8_t i8 = 42;
+  int8_t neg_i8 = -42;
+
+  EXPECT_EQ(absl::HashOf(s), absl::Hash<std::string>{}(s));
+  EXPECT_EQ(absl::HashOf(d), absl::Hash<double>{}(d));
+  EXPECT_EQ(absl::HashOf(t), (absl::Hash<std::tuple<int, int>>{}(t)));
+  EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i));
+  EXPECT_EQ(absl::HashOf(neg_i), absl::Hash<int>{}(neg_i));
+  EXPECT_EQ(absl::HashOf(i16), absl::Hash<int16_t>{}(i16));
+  EXPECT_EQ(absl::HashOf(neg_i16), absl::Hash<int16_t>{}(neg_i16));
+  EXPECT_EQ(absl::HashOf(i8), absl::Hash<int8_t>{}(i8));
+  EXPECT_EQ(absl::HashOf(neg_i8), absl::Hash<int8_t>{}(neg_i8));
+}
+
+TEST(HashOf, MatchesHashOfTupleForMultipleArguments) {
+  std::string hello = "hello";
+  std::string world = "world";
+
+  EXPECT_EQ(absl::HashOf(), absl::HashOf(std::make_tuple()));
+  EXPECT_EQ(absl::HashOf(hello), absl::HashOf(std::make_tuple(hello)));
+  EXPECT_EQ(absl::HashOf(hello, world),
+            absl::HashOf(std::make_tuple(hello, world)));
+}
+
+template <typename T>
+std::true_type HashOfExplicitParameter(decltype(absl::HashOf<T>(0))) {
+  return {};
+}
+template <typename T>
+std::false_type HashOfExplicitParameter(size_t) {
+  return {};
+}
+
+TEST(HashOf, CantPassExplicitTemplateParameters) {
+  EXPECT_FALSE(HashOfExplicitParameter<int>(0));
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/hash/internal/city.cc b/abseil-cpp/absl/hash/internal/city.cc
index e122c18..f0d3196 100644
--- a/abseil-cpp/absl/hash/internal/city.cc
+++ b/abseil-cpp/absl/hash/internal/city.cc
@@ -97,7 +97,7 @@
   uint32_t d = Fetch32(s + (len >> 1));
   uint32_t e = Fetch32(s);
   uint32_t f = Fetch32(s + len - 4);
-  uint32_t h = len;
+  uint32_t h = static_cast<uint32_t>(len);
 
   return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
 }
@@ -106,15 +106,15 @@
   uint32_t b = 0;
   uint32_t c = 9;
   for (size_t i = 0; i < len; i++) {
-    signed char v = s[i];
-    b = b * c1 + v;
+    signed char v = static_cast<signed char>(s[i]);
+    b = b * c1 + static_cast<uint32_t>(v);
     c ^= b;
   }
-  return fmix(Mur(b, Mur(len, c)));
+  return fmix(Mur(b, Mur(static_cast<uint32_t>(len), c)));
 }
 
 static uint32_t Hash32Len5to12(const char *s, size_t len) {
-  uint32_t a = len, b = len * 5, c = 9, d = b;
+  uint32_t a = static_cast<uint32_t>(len), b = a * 5, c = 9, d = b;
   a += Fetch32(s);
   b += Fetch32(s + len - 4);
   c += Fetch32(s + ((len >> 1) & 4));
@@ -129,7 +129,7 @@
   }
 
   // len > 24
-  uint32_t h = len, g = c1 * len, f = g;
+  uint32_t h = static_cast<uint32_t>(len), g = c1 * h, f = g;
 
   uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
   uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
@@ -200,10 +200,6 @@
 
 static uint64_t ShiftMix(uint64_t val) { return val ^ (val >> 47); }
 
-static uint64_t HashLen16(uint64_t u, uint64_t v) {
-  return Hash128to64(uint128(u, v));
-}
-
 static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) {
   // Murmur-inspired hashing.
   uint64_t a = (u ^ v) * mul;
@@ -214,6 +210,11 @@
   return b;
 }
 
+static uint64_t HashLen16(uint64_t u, uint64_t v) {
+  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+  return HashLen16(u, v, kMul);
+}
+
 static uint64_t HashLen0to16(const char *s, size_t len) {
   if (len >= 8) {
     uint64_t mul = k2 + len * 2;
@@ -229,11 +230,11 @@
     return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
   }
   if (len > 0) {
-    uint8_t a = s[0];
-    uint8_t b = s[len >> 1];
-    uint8_t c = s[len - 1];
+    uint8_t a = static_cast<uint8_t>(s[0]);
+    uint8_t b = static_cast<uint8_t>(s[len >> 1]);
+    uint8_t c = static_cast<uint8_t>(s[len - 1]);
     uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
-    uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+    uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
     return ShiftMix(y * k2 ^ z * k0) * k2;
   }
   return k2;
@@ -253,9 +254,8 @@
 
 // Return a 16-byte hash for 48 bytes.  Quick and dirty.
 // Callers do best to use "random-looking" values for a and b.
-static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(uint64_t w, uint64_t x,
-                                                        uint64_t y, uint64_t z,
-                                                        uint64_t a, uint64_t b) {
+static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(
+    uint64_t w, uint64_t x, uint64_t y, uint64_t z, uint64_t a, uint64_t b) {
   a += w;
   b = Rotate(b + a + z, 21);
   uint64_t c = a;
@@ -266,8 +266,9 @@
 }
 
 // Return a 16-byte hash for s[0] ... s[31], a, and b.  Quick and dirty.
-static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s, uint64_t a,
-                                                        uint64_t b) {
+static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s,
+                                                            uint64_t a,
+                                                            uint64_t b) {
   return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16),
                                 Fetch64(s + 24), a, b);
 }
@@ -310,8 +311,10 @@
   uint64_t x = Fetch64(s + len - 40);
   uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
   uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
-  std::pair<uint64_t, uint64_t> v = WeakHashLen32WithSeeds(s + len - 64, len, z);
-  std::pair<uint64_t, uint64_t> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
+  std::pair<uint64_t, uint64_t> v =
+      WeakHashLen32WithSeeds(s + len - 64, len, z);
+  std::pair<uint64_t, uint64_t> w =
+      WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
   x = x * k1 + Fetch64(s);
 
   // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
@@ -337,7 +340,7 @@
 }
 
 uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
-                           uint64_t seed1) {
+                             uint64_t seed1) {
   return HashLen16(CityHash64(s, len) - seed0, seed1);
 }
 
diff --git a/abseil-cpp/absl/hash/internal/city.h b/abseil-cpp/absl/hash/internal/city.h
index 161c774..393da0b 100644
--- a/abseil-cpp/absl/hash/internal/city.h
+++ b/abseil-cpp/absl/hash/internal/city.h
@@ -56,11 +56,6 @@
 ABSL_NAMESPACE_BEGIN
 namespace hash_internal {
 
-typedef std::pair<uint64_t, uint64_t> uint128;
-
-inline uint64_t Uint128Low64(const uint128 &x) { return x.first; }
-inline uint64_t Uint128High64(const uint128 &x) { return x.second; }
-
 // Hash function for a byte array.
 uint64_t CityHash64(const char *s, size_t len);
 
@@ -71,24 +66,11 @@
 // Hash function for a byte array.  For convenience, two seeds are also
 // hashed into the result.
 uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
-                           uint64_t seed1);
+                             uint64_t seed1);
 
 // Hash function for a byte array.  Most useful in 32-bit binaries.
 uint32_t CityHash32(const char *s, size_t len);
 
-// Hash 128 input bits down to 64 bits of output.
-// This is intended to be a reasonably good hash function.
-inline uint64_t Hash128to64(const uint128 &x) {
-  // Murmur-inspired hashing.
-  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
-  uint64_t a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul;
-  a ^= (a >> 47);
-  uint64_t b = (Uint128High64(x) ^ a) * kMul;
-  b ^= (b >> 47);
-  b *= kMul;
-  return b;
-}
-
 }  // namespace hash_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/hash/internal/city_test.cc b/abseil-cpp/absl/hash/internal/city_test.cc
index 251d381..1bbf02e 100644
--- a/abseil-cpp/absl/hash/internal/city_test.cc
+++ b/abseil-cpp/absl/hash/internal/city_test.cc
@@ -22,6 +22,7 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace hash_internal {
+namespace {
 
 static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
 static const uint64_t kSeed0 = 1234567;
@@ -590,6 +591,7 @@
   TestUnchanging(testdata[i], 0, kDataSize);
 }
 
+}  // namespace
 }  // namespace hash_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/hash/internal/hash.cc b/abseil-cpp/absl/hash/internal/hash.cc
index b44ecb3..11451e5 100644
--- a/abseil-cpp/absl/hash/internal/hash.cc
+++ b/abseil-cpp/absl/hash/internal/hash.cc
@@ -18,13 +18,12 @@
 ABSL_NAMESPACE_BEGIN
 namespace hash_internal {
 
-uint64_t CityHashState::CombineLargeContiguousImpl32(uint64_t state,
-                                                     const unsigned char* first,
-                                                     size_t len) {
+uint64_t MixingHashState::CombineLargeContiguousImpl32(
+    uint64_t state, const unsigned char* first, size_t len) {
   while (len >= PiecewiseChunkSize()) {
-    state =
-        Mix(state, absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first),
-                                         PiecewiseChunkSize()));
+    state = Mix(state,
+                hash_internal::CityHash32(reinterpret_cast<const char*>(first),
+                                          PiecewiseChunkSize()));
     len -= PiecewiseChunkSize();
     first += PiecewiseChunkSize();
   }
@@ -33,13 +32,10 @@
                                std::integral_constant<int, 4>{});
 }
 
-uint64_t CityHashState::CombineLargeContiguousImpl64(uint64_t state,
-                                                     const unsigned char* first,
-                                                     size_t len) {
+uint64_t MixingHashState::CombineLargeContiguousImpl64(
+    uint64_t state, const unsigned char* first, size_t len) {
   while (len >= PiecewiseChunkSize()) {
-    state =
-        Mix(state, absl::hash_internal::CityHash64(reinterpret_cast<const char*>(first),
-                                         PiecewiseChunkSize()));
+    state = Mix(state, Hash64(first, PiecewiseChunkSize()));
     len -= PiecewiseChunkSize();
     first += PiecewiseChunkSize();
   }
@@ -48,7 +44,25 @@
                                std::integral_constant<int, 8>{});
 }
 
-ABSL_CONST_INIT const void* const CityHashState::kSeed = &kSeed;
+ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed;
+
+// The salt array used by LowLevelHash. This array is NOT the mechanism used to
+// make absl::Hash non-deterministic between program invocations.  See `Seed()`
+// for that mechanism.
+//
+// Any random values are fine. These values are just digits from the decimal
+// part of pi.
+// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
+constexpr uint64_t kHashSalt[5] = {
+    uint64_t{0x243F6A8885A308D3}, uint64_t{0x13198A2E03707344},
+    uint64_t{0xA4093822299F31D0}, uint64_t{0x082EFA98EC4E6C89},
+    uint64_t{0x452821E638D01377},
+};
+
+uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
+                                           size_t len) {
+  return LowLevelHash(data, len, Seed(), kHashSalt);
+}
 
 }  // namespace hash_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/hash/internal/hash.h b/abseil-cpp/absl/hash/internal/hash.h
index 9e608f7..ef3f366 100644
--- a/abseil-cpp/absl/hash/internal/hash.h
+++ b/abseil-cpp/absl/hash/internal/hash.h
@@ -21,7 +21,9 @@
 
 #include <algorithm>
 #include <array>
+#include <bitset>
 #include <cmath>
+#include <cstddef>
 #include <cstring>
 #include <deque>
 #include <forward_list>
@@ -35,22 +37,34 @@
 #include <string>
 #include <tuple>
 #include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
 #include <utility>
 #include <vector>
 
-#include "absl/base/internal/endian.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
 #include "absl/base/port.h"
 #include "absl/container/fixed_array.h"
+#include "absl/hash/internal/city.h"
+#include "absl/hash/internal/low_level_hash.h"
 #include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/optional.h"
 #include "absl/types/variant.h"
 #include "absl/utility/utility.h"
-#include "absl/hash/internal/city.h"
+
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
+
+class HashState;
+
 namespace hash_internal {
 
 // Internal detail: Large buffers are hashed in smaller chunks.  This function
@@ -112,24 +126,66 @@
   size_t position_;
 };
 
+// is_hashable()
+//
+// Trait class which returns true if T is hashable by the absl::Hash framework.
+// Used for the AbslHashValue implementations for composite types below.
+template <typename T>
+struct is_hashable;
+
 // HashStateBase
 //
-// A hash state object represents an intermediate state in the computation
-// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style
-// base class for hash state implementations. Developers adding type support
-// for `absl::Hash` should not rely on any parts of the state object other than
-// the following member functions:
+// An internal implementation detail that contains common implementation details
+// for all of the "hash state objects" objects generated by Abseil.  This is not
+// a public API; users should not create classes that inherit from this.
+//
+// A hash state object is the template argument `H` passed to `AbslHashValue`.
+// It represents an intermediate state in the computation of an unspecified hash
+// algorithm. `HashStateBase` provides a CRTP style base class for hash state
+// implementations. Developers adding type support for `absl::Hash` should not
+// rely on any parts of the state object other than the following member
+// functions:
 //
 //   * HashStateBase::combine()
 //   * HashStateBase::combine_contiguous()
+//   * HashStateBase::combine_unordered()
 //
-// A derived hash state class of type `H` must provide a static member function
+// A derived hash state class of type `H` must provide a public member function
 // with a signature similar to the following:
 //
 //    `static H combine_contiguous(H state, const unsigned char*, size_t)`.
 //
+// It must also provide a private template method named RunCombineUnordered.
+//
+// A "consumer" is a 1-arg functor returning void.  Its argument is a reference
+// to an inner hash state object, and it may be called multiple times.  When
+// called, the functor consumes the entropy from the provided state object,
+// and resets that object to its empty state.
+//
+// A "combiner" is a stateless 2-arg functor returning void.  Its arguments are
+// an inner hash state object and an ElementStateConsumer functor.  A combiner
+// uses the provided inner hash state object to hash each element of the
+// container, passing the inner hash state object to the consumer after hashing
+// each element.
+//
+// Given these definitions, a derived hash state class of type H
+// must provide a private template method with a signature similar to the
+// following:
+//
+//    `template <typename CombinerT>`
+//    `static H RunCombineUnordered(H outer_state, CombinerT combiner)`
+//
+// This function is responsible for constructing the inner state object and
+// providing a consumer to the combiner.  It uses side effects of the consumer
+// and combiner to mix the state of each element in an order-independent manner,
+// and uses this to return an updated value of `outer_state`.
+//
+// This inside-out approach generates efficient object code in the normal case,
+// but allows us to use stack storage to implement the absl::HashState type
+// erasure mechanism (avoiding heap allocations while hashing).
+//
 // `HashStateBase` will provide a complete implementation for a hash state
-// object in terms of this method.
+// object in terms of these two methods.
 //
 // Example:
 //
@@ -138,6 +194,10 @@
 //       static H combine_contiguous(H state, const unsigned char*, size_t);
 //       using MyHashState::HashStateBase::combine;
 //       using MyHashState::HashStateBase::combine_contiguous;
+//       using MyHashState::HashStateBase::combine_unordered;
+//     private:
+//       template <typename CombinerT>
+//       static H RunCombineUnordered(H state, CombinerT combiner);
 //   };
 template <typename H>
 class HashStateBase {
@@ -178,7 +238,30 @@
   template <typename T>
   static H combine_contiguous(H state, const T* data, size_t size);
 
+  template <typename I>
+  static H combine_unordered(H state, I begin, I end);
+
   using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
+
+  template <typename T>
+  using is_hashable = absl::hash_internal::is_hashable<T>;
+
+ private:
+  // Common implementation of the iteration step of a "combiner", as described
+  // above.
+  template <typename I>
+  struct CombineUnorderedCallback {
+    I begin;
+    I end;
+
+    template <typename InnerH, typename ElementStateConsumer>
+    void operator()(InnerH inner_state, ElementStateConsumer cb) {
+      for (; begin != end; ++begin) {
+        inner_state = H::combine(std::move(inner_state), *begin);
+        cb(inner_state);
+      }
+    }
+  };
 };
 
 // is_uniquely_represented
@@ -343,17 +426,43 @@
   return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
 }
 
+// AbslHashValue() for hashing pointers-to-member
+template <typename H, typename T, typename C>
+H AbslHashValue(H hash_state, T C::*ptr) {
+  auto salient_ptm_size = [](std::size_t n) -> std::size_t {
+#if defined(_MSC_VER)
+    // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
+    // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain
+    // padding (namely when they have 1 or 3 ints). The value below is a lower
+    // bound on the number of salient, non-padding bytes that we use for
+    // hashing.
+    if (alignof(T C::*) == alignof(int)) {
+      // No padding when all subobjects have the same size as the total
+      // alignment. This happens in 32-bit mode.
+      return n;
+    } else {
+      // Padding for 1 int (size 16) or 3 ints (size 24).
+      // With 2 ints, the size is 16 with no padding, which we pessimize.
+      return n == 24 ? 20 : n == 16 ? 12 : n;
+    }
+#else
+  // On other platforms, we assume that pointers-to-members do not have
+  // padding.
+#ifdef __cpp_lib_has_unique_object_representations
+    static_assert(std::has_unique_object_representations<T C::*>::value);
+#endif  // __cpp_lib_has_unique_object_representations
+    return n;
+#endif
+  };
+  return H::combine_contiguous(std::move(hash_state),
+                               reinterpret_cast<unsigned char*>(&ptr),
+                               salient_ptm_size(sizeof ptr));
+}
+
 // -----------------------------------------------------------------------------
 // AbslHashValue for Composite Types
 // -----------------------------------------------------------------------------
 
-// is_hashable()
-//
-// Trait class which returns true if T is hashable by the absl::Hash framework.
-// Used for the AbslHashValue implementations for composite types below.
-template <typename T>
-struct is_hashable;
-
 // AbslHashValue() for hashing pairs
 template <typename H, typename T1, typename T2>
 typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
@@ -377,7 +486,7 @@
 // This SFINAE gets MSVC confused under some conditions. Let's just disable it
 // for now.
 H
-#else  // _MSC_VER
+#else   // _MSC_VER
 typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type
 #endif  // _MSC_VER
 AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
@@ -411,14 +520,15 @@
 // the same character sequence. These types are:
 //
 //  - `absl::Cord`
-//  - `std::string` (and std::basic_string<char, std::char_traits<char>, A> for
-//      any allocator A)
-//  - `absl::string_view` and `std::string_view`
+//  - `std::string` (and std::basic_string<T, std::char_traits<T>, A> for
+//      any allocator A and any T in {char, wchar_t, char16_t, char32_t})
+//  - `absl::string_view`, `std::string_view`, `std::wstring_view`,
+//    `std::u16string_view`, and `std::u32_string_view`.
 //
-// For simplicity, we currently support only `char` strings. This support may
-// be broadened, if necessary, but with some caution - this overload would
-// misbehave in cases where the traits' `eq()` member isn't equivalent to `==`
-// on the underlying character type.
+// For simplicity, we currently support only strings built on `char`, `wchar_t`,
+// `char16_t`, or `char32_t`. This support may be broadened, if necessary, but
+// with some caution - this overload would misbehave in cases where the traits'
+// `eq()` member isn't equivalent to `==` on the underlying character type.
 template <typename H>
 H AbslHashValue(H hash_state, absl::string_view str) {
   return H::combine(
@@ -439,6 +549,21 @@
       str.size());
 }
 
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+
+// Support std::wstring_view, std::u16string_view and std::u32string_view.
+template <typename Char, typename H,
+          typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
+                                       std::is_same<Char, char16_t>::value ||
+                                       std::is_same<Char, char32_t>::value>>
+H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
+  return H::combine(
+      H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
+      str.size());
+}
+
+#endif  // ABSL_HAVE_STD_STRING_VIEW
+
 // -----------------------------------------------------------------------------
 // AbslHashValue for Sequence Containers
 // -----------------------------------------------------------------------------
@@ -487,8 +612,9 @@
 
 // AbslHashValue for hashing std::vector
 //
-// Do not use this for vector<bool>. It does not have a .data(), and a fallback
-// for std::hash<> is most likely faster.
+// Do not use this for vector<bool> on platforms that have a working
+// implementation of std::hash. It does not have a .data(), and a fallback for
+// std::hash<> is most likely faster.
 template <typename H, typename T, typename Allocator>
 typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
                         H>::type
@@ -498,6 +624,44 @@
                     vector.size());
 }
 
+// AbslHashValue special cases for hashing std::vector<bool>
+
+#if defined(ABSL_IS_BIG_ENDIAN) && \
+    (defined(__GLIBCXX__) || defined(__GLIBCPP__))
+
+// std::hash in libstdc++ does not work correctly with vector<bool> on Big
+// Endian platforms therefore we need to implement a custom AbslHashValue for
+// it. More details on the bug:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+                        H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+  typename H::AbslInternalPiecewiseCombiner combiner;
+  for (const auto& i : vector) {
+    unsigned char c = static_cast<unsigned char>(i);
+    hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
+  }
+  return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
+}
+#else
+// When not working around the libstdc++ bug above, we still have to contend
+// with the fact that std::hash<vector<bool>> is often poor quality, hashing
+// directly on the internal words and on no other state.  On these platforms,
+// vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value.
+//
+// Mixing in the size (as we do in our other vector<> implementations) on top
+// of the library-provided hash implementation avoids this QOI issue.
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+                        H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+  return H::combine(std::move(hash_state),
+                    std::hash<std::vector<T, Allocator>>{}(vector),
+                    vector.size());
+}
+#endif
+
 // -----------------------------------------------------------------------------
 // AbslHashValue for Ordered Associative Containers
 // -----------------------------------------------------------------------------
@@ -548,6 +712,55 @@
 }
 
 // -----------------------------------------------------------------------------
+// AbslHashValue for Unordered Associative Containers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+          typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+    H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
+  return H::combine(
+      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+      s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+          typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+    H hash_state,
+    const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
+  return H::combine(
+      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+      s.size());
+}
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename T, typename Hash,
+          typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+                        H>::type
+AbslHashValue(H hash_state,
+              const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
+  return H::combine(
+      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+      s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename T, typename Hash,
+          typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+                        H>::type
+AbslHashValue(H hash_state,
+              const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
+  return H::combine(
+      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+      s.size());
+}
+
+// -----------------------------------------------------------------------------
 // AbslHashValue for Wrapper Types
 // -----------------------------------------------------------------------------
 
@@ -590,9 +803,28 @@
 // AbslHashValue for Other Types
 // -----------------------------------------------------------------------------
 
-// AbslHashValue for hashing std::bitset is not defined, for the same reason as
-// for vector<bool> (see std::vector above): It does not expose the raw bytes,
-// and a fallback to std::hash<> is most likely faster.
+// AbslHashValue for hashing std::bitset is not defined on Little Endian
+// platforms, for the same reason as for vector<bool> (see std::vector above):
+// It does not expose the raw bytes, and a fallback to std::hash<> is most
+// likely faster.
+
+#if defined(ABSL_IS_BIG_ENDIAN) && \
+    (defined(__GLIBCXX__) || defined(__GLIBCPP__))
+// AbslHashValue for hashing std::bitset
+//
+// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian
+// platforms therefore we need to implement a custom AbslHashValue for it. More
+// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
+template <typename H, size_t N>
+H AbslHashValue(H hash_state, const std::bitset<N>& set) {
+  typename H::AbslInternalPiecewiseCombiner combiner;
+  for (int i = 0; i < N; i++) {
+    unsigned char c = static_cast<unsigned char>(set[i]);
+    hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
+  }
+  return H::combine(combiner.finalize(std::move(hash_state)), N);
+}
+#endif
 
 // -----------------------------------------------------------------------------
 
@@ -712,9 +944,8 @@
 struct is_hashable
     : std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
 
-// CityHashState
-class ABSL_DLL CityHashState
-    : public HashStateBase<CityHashState> {
+// MixingHashState
+class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
   // absl::uint128 is not an alias or a thin wrapper around the intrinsic.
   // We use the intrinsic when available to improve performance.
 #ifdef ABSL_HAVE_INTRINSIC_INT128
@@ -724,8 +955,8 @@
 #endif  // ABSL_HAVE_INTRINSIC_INT128
 
   static constexpr uint64_t kMul =
-      sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
-                          : uint64_t{0x9ddfea08eb382d69};
+  sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
+                      : uint64_t{0x9ddfea08eb382d69};
 
   template <typename T>
   using IntegralFastPath =
@@ -733,23 +964,23 @@
 
  public:
   // Move only
-  CityHashState(CityHashState&&) = default;
-  CityHashState& operator=(CityHashState&&) = default;
+  MixingHashState(MixingHashState&&) = default;
+  MixingHashState& operator=(MixingHashState&&) = default;
 
-  // CityHashState::combine_contiguous()
+  // MixingHashState::combine_contiguous()
   //
   // Fundamental base case for hash recursion: mixes the given range of bytes
   // into the hash state.
-  static CityHashState combine_contiguous(CityHashState hash_state,
-                                          const unsigned char* first,
-                                          size_t size) {
-    return CityHashState(
+  static MixingHashState combine_contiguous(MixingHashState hash_state,
+                                            const unsigned char* first,
+                                            size_t size) {
+    return MixingHashState(
         CombineContiguousImpl(hash_state.state_, first, size,
                               std::integral_constant<int, sizeof(size_t)>{}));
   }
-  using CityHashState::HashStateBase::combine_contiguous;
+  using MixingHashState::HashStateBase::combine_contiguous;
 
-  // CityHashState::hash()
+  // MixingHashState::hash()
   //
   // For performance reasons in non-opt mode, we specialize this for
   // integral types.
@@ -758,27 +989,53 @@
   // The result should be the same as running the whole algorithm, but faster.
   template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
   static size_t hash(T value) {
-    return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
+    return static_cast<size_t>(
+        Mix(Seed(), static_cast<std::make_unsigned_t<T>>(value)));
   }
 
-  // Overload of CityHashState::hash()
+  // Overload of MixingHashState::hash()
   template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
   static size_t hash(const T& value) {
-    return static_cast<size_t>(combine(CityHashState{}, value).state_);
+    return static_cast<size_t>(combine(MixingHashState{}, value).state_);
   }
 
  private:
   // Invoked only once for a given argument; that plus the fact that this is
   // move-only ensures that there is only one non-moved-from object.
-  CityHashState() : state_(Seed()) {}
+  MixingHashState() : state_(Seed()) {}
+
+  friend class MixingHashState::HashStateBase;
+
+  template <typename CombinerT>
+  static MixingHashState RunCombineUnordered(MixingHashState state,
+                                             CombinerT combiner) {
+    uint64_t unordered_state = 0;
+    combiner(MixingHashState{}, [&](MixingHashState& inner_state) {
+      // Add the hash state of the element to the running total, but mix the
+      // carry bit back into the low bit.  This in intended to avoid losing
+      // entropy to overflow, especially when unordered_multisets contain
+      // multiple copies of the same value.
+      auto element_state = inner_state.state_;
+      unordered_state += element_state;
+      if (unordered_state < element_state) {
+        ++unordered_state;
+      }
+      inner_state = MixingHashState{};
+    });
+    return MixingHashState::combine(std::move(state), unordered_state);
+  }
+
+  // Allow the HashState type-erasure implementation to invoke
+  // RunCombinedUnordered() directly.
+  friend class absl::HashState;
 
   // Workaround for MSVC bug.
   // We make the type copyable to fix the calling convention, even though we
   // never actually copy it. Keep it private to not affect the public API of the
   // type.
-  CityHashState(const CityHashState&) = default;
+  MixingHashState(const MixingHashState&) = default;
 
-  explicit CityHashState(uint64_t state) : state_(state) {}
+  explicit MixingHashState(uint64_t state) : state_(state) {}
 
   // Implementation of the base case for combine_contiguous where we actually
   // mix the bytes into the state.
@@ -791,7 +1048,7 @@
   static uint64_t CombineContiguousImpl(uint64_t state,
                                         const unsigned char* first, size_t len,
                                         std::integral_constant<int, 8>
-                                        /* sizeof_size_t*/);
+                                        /* sizeof_size_t */);
 
   // Slow dispatch path for calls to CombineContiguousImpl with a size argument
   // larger than PiecewiseChunkSize().  Has the same effect as calling
@@ -804,29 +1061,60 @@
                                                size_t len);
 
   // Reads 9 to 16 bytes from p.
-  // The first 8 bytes are in .first, the rest (zero padded) bytes are in
-  // .second.
+  // The least significant 8 bytes are in .first, the rest (zero padded) bytes
+  // are in .second.
   static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
                                                  size_t len) {
-    uint64_t high = little_endian::Load64(p + len - 8);
-    return {little_endian::Load64(p), high >> (128 - len * 8)};
+    uint64_t low_mem = absl::base_internal::UnalignedLoad64(p);
+    uint64_t high_mem = absl::base_internal::UnalignedLoad64(p + len - 8);
+#ifdef ABSL_IS_LITTLE_ENDIAN
+    uint64_t most_significant = high_mem;
+    uint64_t least_significant = low_mem;
+#else
+    uint64_t most_significant = low_mem;
+    uint64_t least_significant = high_mem;
+#endif
+    return {least_significant, most_significant};
   }
 
   // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.
   static uint64_t Read4To8(const unsigned char* p, size_t len) {
-    return (static_cast<uint64_t>(little_endian::Load32(p + len - 4))
-            << (len - 4) * 8) |
-           little_endian::Load32(p);
+    uint32_t low_mem = absl::base_internal::UnalignedLoad32(p);
+    uint32_t high_mem = absl::base_internal::UnalignedLoad32(p + len - 4);
+#ifdef ABSL_IS_LITTLE_ENDIAN
+    uint32_t most_significant = high_mem;
+    uint32_t least_significant = low_mem;
+#else
+    uint32_t most_significant = low_mem;
+    uint32_t least_significant = high_mem;
+#endif
+    return (static_cast<uint64_t>(most_significant) << (len - 4) * 8) |
+           least_significant;
   }
 
   // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
   static uint32_t Read1To3(const unsigned char* p, size_t len) {
-    return static_cast<uint32_t>((p[0]) |                         //
-                                 (p[len / 2] << (len / 2 * 8)) |  //
-                                 (p[len - 1] << ((len - 1) * 8)));
+    // The trick used by this implementation is to avoid branches if possible.
+    unsigned char mem0 = p[0];
+    unsigned char mem1 = p[len / 2];
+    unsigned char mem2 = p[len - 1];
+#ifdef ABSL_IS_LITTLE_ENDIAN
+    unsigned char significant2 = mem2;
+    unsigned char significant1 = mem1;
+    unsigned char significant0 = mem0;
+#else
+    unsigned char significant2 = mem0;
+    unsigned char significant1 = len == 2 ? mem0 : mem1;
+    unsigned char significant0 = mem2;
+#endif
+    return static_cast<uint32_t>(significant0 |                     //
+                                 (significant1 << (len / 2 * 8)) |  //
+                                 (significant2 << ((len - 1) * 8)));
   }
 
   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
+    // Though the 128-bit product on AArch64 needs two instructions, it is
+    // still a good balance between speed and hash quality.
     using MultType =
         absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
     // We do the addition in 64-bit space to make sure the 128-bit
@@ -838,6 +1126,19 @@
     return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));
   }
 
+  // An extern to avoid bloat on a direct call to LowLevelHash() with fixed
+  // values for both the seed and salt parameters.
+  static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);
+
+  ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
+                                                      size_t len) {
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+    return LowLevelHashImpl(data, len);
+#else
+    return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
+#endif
+  }
+
   // Seed()
   //
   // A non-deterministic seed.
@@ -855,15 +1156,23 @@
   // On other platforms this is still going to be non-deterministic but most
   // probably per-build and not per-process.
   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
+#if (!defined(__clang__) || __clang_major__ > 11) && \
+    (!defined(__apple_build_version__) ||            \
+     __apple_build_version__ >= 19558921)  // Xcode 12
+    return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
+#else
+    // Workaround the absence of
+    // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
     return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));
+#endif
   }
   static const void* const kSeed;
 
   uint64_t state_;
 };
 
-// CityHashState::CombineContiguousImpl()
-inline uint64_t CityHashState::CombineContiguousImpl(
+// MixingHashState::CombineContiguousImpl()
+inline uint64_t MixingHashState::CombineContiguousImpl(
     uint64_t state, const unsigned char* first, size_t len,
     std::integral_constant<int, 4> /* sizeof_size_t */) {
   // For large values we use CityHash, for small ones we just use a
@@ -873,7 +1182,7 @@
     if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
       return CombineLargeContiguousImpl32(state, first, len);
     }
-    v = absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
+    v = hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
   } else if (len >= 4) {
     v = Read4To8(first, len);
   } else if (len > 0) {
@@ -885,22 +1194,35 @@
   return Mix(state, v);
 }
 
-// Overload of CityHashState::CombineContiguousImpl()
-inline uint64_t CityHashState::CombineContiguousImpl(
+// Overload of MixingHashState::CombineContiguousImpl()
+inline uint64_t MixingHashState::CombineContiguousImpl(
     uint64_t state, const unsigned char* first, size_t len,
     std::integral_constant<int, 8> /* sizeof_size_t */) {
-  // For large values we use CityHash, for small ones we just use a
-  // multiplicative hash.
+  // For large values we use LowLevelHash or CityHash depending on the platform,
+  // for small ones we just use a multiplicative hash.
   uint64_t v;
   if (len > 16) {
     if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
       return CombineLargeContiguousImpl64(state, first, len);
     }
-    v = absl::hash_internal::CityHash64(reinterpret_cast<const char*>(first), len);
+    v = Hash64(first, len);
   } else if (len > 8) {
+    // This hash function was constructed by the ML-driven algorithm discovery
+    // using reinforcement learning. We fed the agent lots of inputs from
+    // microbenchmarks, SMHasher, low hamming distance from generated inputs and
+    // picked up the one that was good on micro and macrobenchmarks.
     auto p = Read9To16(first, len);
-    state = Mix(state, p.first);
-    v = p.second;
+    uint64_t lo = p.first;
+    uint64_t hi = p.second;
+    // Rotation by 53 was found to be most often useful when discovering these
+    // hashing algorithms with ML techniques.
+    lo = absl::rotr(lo, 53);
+    state += kMul;
+    lo += state;
+    state ^= hi;
+    uint128 m = state;
+    m *= lo;
+    return static_cast<uint64_t>(m ^ (m >> 64));
   } else if (len >= 4) {
     v = Read4To8(first, len);
   } else if (len > 0) {
@@ -927,7 +1249,9 @@
 
 template <typename T>
 struct HashImpl {
-  size_t operator()(const T& value) const { return CityHashState::hash(value); }
+  size_t operator()(const T& value) const {
+    return MixingHashState::hash(value);
+  }
 };
 
 template <typename T>
@@ -949,6 +1273,14 @@
   return hash_internal::hash_range_or_bytes(std::move(state), data, size);
 }
 
+// HashStateBase::combine_unordered()
+template <typename H>
+template <typename I>
+H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
+  return H::RunCombineUnordered(std::move(state),
+                                CombineUnorderedCallback<I>{begin, end});
+}
+
 // HashStateBase::PiecewiseCombiner::add_buffer()
 template <typename H>
 H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
diff --git a/abseil-cpp/absl/hash/internal/hash_test.h b/abseil-cpp/absl/hash/internal/hash_test.h
new file mode 100644
index 0000000..9963dc0
--- /dev/null
+++ b/abseil-cpp/absl/hash/internal/hash_test.h
@@ -0,0 +1,87 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Common code shared between absl/hash/hash_test.cc and
+// absl/hash/hash_instantiated_test.cc.
+
+#ifndef ABSL_HASH_INTERNAL_HASH_TEST_H_
+#define ABSL_HASH_INTERNAL_HASH_TEST_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/hash/hash.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_test_internal {
+
+// Utility wrapper of T for the purposes of testing the `AbslHash` type erasure
+// mechanism.  `TypeErasedValue<T>` can be constructed with a `T`, and can
+// be compared and hashed.  However, all hashing goes through the hashing
+// type-erasure framework.
+template <typename T>
+class TypeErasedValue {
+ public:
+  TypeErasedValue() = default;
+  TypeErasedValue(const TypeErasedValue&) = default;
+  TypeErasedValue(TypeErasedValue&&) = default;
+  explicit TypeErasedValue(const T& n) : n_(n) {}
+
+  template <typename H>
+  friend H AbslHashValue(H hash_state, const TypeErasedValue& v) {
+    v.HashValue(absl::HashState::Create(&hash_state));
+    return hash_state;
+  }
+
+  void HashValue(absl::HashState state) const {
+    absl::HashState::combine(std::move(state), n_);
+  }
+
+  bool operator==(const TypeErasedValue& rhs) const { return n_ == rhs.n_; }
+  bool operator!=(const TypeErasedValue& rhs) const { return !(*this == rhs); }
+
+ private:
+  T n_;
+};
+
+// A TypeErasedValue refinement, for containers.  It exposes the wrapped
+// `value_type` and is constructible from an initializer list.
+template <typename T>
+class TypeErasedContainer : public TypeErasedValue<T> {
+ public:
+  using value_type = typename T::value_type;
+  TypeErasedContainer() = default;
+  TypeErasedContainer(const TypeErasedContainer&) = default;
+  TypeErasedContainer(TypeErasedContainer&&) = default;
+  explicit TypeErasedContainer(const T& n) : TypeErasedValue<T>(n) {}
+  TypeErasedContainer(std::initializer_list<value_type> init_list)
+      : TypeErasedContainer(T(init_list.begin(), init_list.end())) {}
+  // one-argument constructor of value type T, to appease older toolchains that
+  // get confused by one-element initializer lists in some contexts
+  explicit TypeErasedContainer(const value_type& v)
+      : TypeErasedContainer(T(&v, &v + 1)) {}
+};
+
+// Helper trait to verify if T is hashable. We use absl::Hash's poison status to
+// detect it.
+template <typename T>
+using is_hashable = std::is_default_constructible<absl::Hash<T>>;
+
+}  // namespace hash_test_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_HASH_INTERNAL_HASH_TEST_H_
diff --git a/abseil-cpp/absl/hash/internal/low_level_hash.cc b/abseil-cpp/absl/hash/internal/low_level_hash.cc
new file mode 100644
index 0000000..b5db0b8
--- /dev/null
+++ b/abseil-cpp/absl/hash/internal/low_level_hash.cc
@@ -0,0 +1,118 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/hash/internal/low_level_hash.h"
+
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/prefetch.h"
+#include "absl/numeric/int128.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+static uint64_t Mix(uint64_t v0, uint64_t v1) {
+  absl::uint128 p = v0;
+  p *= v1;
+  return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
+}
+
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+                      const uint64_t salt[5]) {
+  // Prefetch the cacheline that data resides in.
+  PrefetchToLocalCache(data);
+  const uint8_t* ptr = static_cast<const uint8_t*>(data);
+  uint64_t starting_length = static_cast<uint64_t>(len);
+  uint64_t current_state = seed ^ salt[0];
+
+  if (len > 64) {
+    // If we have more than 64 bytes, we're going to handle chunks of 64
+    // bytes at a time. We're going to build up two separate hash states
+    // which we will then hash together.
+    uint64_t duplicated_state = current_state;
+
+    do {
+      // Always prefetch the next cacheline.
+      PrefetchToLocalCache(ptr + ABSL_CACHELINE_SIZE);
+
+      uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+      uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+      uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
+      uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
+      uint64_t e = absl::base_internal::UnalignedLoad64(ptr + 32);
+      uint64_t f = absl::base_internal::UnalignedLoad64(ptr + 40);
+      uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
+      uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
+
+      uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
+      uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+      current_state = (cs0 ^ cs1);
+
+      uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state);
+      uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state);
+      duplicated_state = (ds0 ^ ds1);
+
+      ptr += 64;
+      len -= 64;
+    } while (len > 64);
+
+    current_state = current_state ^ duplicated_state;
+  }
+
+  // We now have a data `ptr` with at most 64 bytes and the current state
+  // of the hashing state machine stored in current_state.
+  while (len > 16) {
+    uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+    uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+
+    current_state = Mix(a ^ salt[1], b ^ current_state);
+
+    ptr += 16;
+    len -= 16;
+  }
+
+  // We now have a data `ptr` with at most 16 bytes.
+  uint64_t a = 0;
+  uint64_t b = 0;
+  if (len > 8) {
+    // When we have at least 9 and at most 16 bytes, set A to the first 64
+    // bits of the input and B to the last 64 bits of the input. Yes, they will
+    // overlap in the middle if we are working with less than the full 16
+    // bytes.
+    a = absl::base_internal::UnalignedLoad64(ptr);
+    b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
+  } else if (len > 3) {
+    // If we have at least 4 and at most 8 bytes, set A to the first 32
+    // bits and B to the last 32 bits.
+    a = absl::base_internal::UnalignedLoad32(ptr);
+    b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
+  } else if (len > 0) {
+    // If we have at least 1 and at most 3 bytes, read all of the provided
+    // bits into A, with some adjustments.
+    a = static_cast<uint64_t>((ptr[0] << 16) | (ptr[len >> 1] << 8) |
+                              ptr[len - 1]);
+    b = 0;
+  } else {
+    a = 0;
+    b = 0;
+  }
+
+  uint64_t w = Mix(a ^ salt[1], b ^ current_state);
+  uint64_t z = salt[1] ^ starting_length;
+  return Mix(w, z);
+}
+
+}  // namespace hash_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/hash/internal/low_level_hash.h b/abseil-cpp/absl/hash/internal/low_level_hash.h
new file mode 100644
index 0000000..439968a
--- /dev/null
+++ b/abseil-cpp/absl/hash/internal/low_level_hash.h
@@ -0,0 +1,50 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file provides the Google-internal implementation of LowLevelHash.
+//
+// LowLevelHash is a fast hash function for hash tables, the fastest we've
+// currently (late 2020) found that passes the SMHasher tests. The algorithm
+// relies on intrinsic 128-bit multiplication for speed. This is not meant to be
+// secure - just fast.
+//
+// It is closely based on a version of wyhash, but does not maintain or
+// guarantee future compatibility with it.
+
+#ifndef ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
+#define ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// Hash function for a byte array. A 64-bit seed and a set of five 64-bit
+// integers are hashed into the result.
+//
+// To allow all hashable types (including string_view and Span) to depend on
+// this algorithm, we keep the API low-level, with as few dependencies as
+// possible.
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+                      const uint64_t salt[5]);
+
+}  // namespace hash_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
diff --git a/abseil-cpp/absl/hash/internal/low_level_hash_test.cc b/abseil-cpp/absl/hash/internal/low_level_hash_test.cc
new file mode 100644
index 0000000..589a3d8
--- /dev/null
+++ b/abseil-cpp/absl/hash/internal/low_level_hash_test.cc
@@ -0,0 +1,532 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/hash/internal/low_level_hash.h"
+
+#include <cinttypes>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/escaping.h"
+
+#define UPDATE_GOLDEN 0
+
+namespace {
+
+static const uint64_t kSalt[5] = {0xa0761d6478bd642f, 0xe7037ed1a0b428dbl,
+                                  0x8ebc6af09c88c6e3, 0x589965cc75374cc3l,
+                                  0x1d8e4e27c47d124f};
+
+TEST(LowLevelHashTest, VerifyGolden) {
+  constexpr size_t kNumGoldenOutputs = 134;
+  static struct {
+    absl::string_view base64_data;
+    uint64_t seed;
+  } cases[] = {
+      {"", uint64_t{0xec42b7ab404b8acb}},
+      {"ICAg", uint64_t{0}},
+      {"YWFhYQ==", uint64_t{0}},
+      {"AQID", uint64_t{0}},
+      {"AQIDBA==", uint64_t{0}},
+      {"dGhpcmRfcGFydHl8d3loYXNofDY0", uint64_t{0}},
+      {"Zw==", uint64_t{0xeeee074043a3ee0f}},
+      {"xmk=", uint64_t{0x857902089c393de}},
+      {"c1H/", uint64_t{0x993df040024ca3af}},
+      {"SuwpzQ==", uint64_t{0xc4e4c2acea740e96}},
+      {"uqvy++M=", uint64_t{0x6a214b3db872d0cf}},
+      {"RnzCVPgb", uint64_t{0x44343db6a89dba4d}},
+      {"6OeNdlouYw==", uint64_t{0x77b5d6d1ae1dd483}},
+      {"M5/JmmYyDbc=", uint64_t{0x89ab8ecb44d221f1}},
+      {"MVijWiVdBRdY", uint64_t{0x60244b17577ca81b}},
+      {"6V7Uq7LNxpu0VA==", uint64_t{0x59a08dcee0717067}},
+      {"EQ6CdEEhPdyHcOk=", uint64_t{0xf5f20db3ade57396}},
+      {"PqFB4fxnPgF+l+rc", uint64_t{0xbf8dee0751ad3efb}},
+      {"a5aPOFwq7LA7+zKvPA==", uint64_t{0x6b7a06b268d63e30}},
+      {"VOwY21wCGv5D+/qqOvs=", uint64_t{0xb8c37f0ae0f54c82}},
+      {"KdHmBTx8lHXYvmGJ+Vy7", uint64_t{0x9fcbed0c38e50eef}},
+      {"qJkPlbHr8bMF7/cA6aE65Q==", uint64_t{0x2af4bade1d8e3a1d}},
+      {"ygvL0EhHZL0fIx6oHHtkxRQ=", uint64_t{0x714e3aa912da2f2c}},
+      {"c1rFXkt5YztwZCQRngncqtSs", uint64_t{0xf5ee75e3cbb82c1c}},
+      {"8hsQrzszzeNQSEcVXLtvIhm6mw==", uint64_t{0x620e7007321b93b9}},
+      {"ffUL4RocfyP4KfikGxO1yk7omDI=", uint64_t{0xc08528cac2e551fc}},
+      {"OOB5TT00vF9Od/rLbAWshiErqhpV", uint64_t{0x6a1debf9cc3ad39}},
+      {"or5wtXM7BFzTNpSzr+Lw5J5PMhVJ/Q==", uint64_t{0x7e0a3c88111fc226}},
+      {"gk6pCHDUsoopVEiaCrzVDhioRKxb844=", uint64_t{0x1301fef15df39edb}},
+      {"TNctmwlC5QbEM6/No4R/La3UdkfeMhzs", uint64_t{0x64e181f3d5817ab}},
+      {"SsQw9iAjhWz7sgcE9OwLuSC6hsM+BfHs2Q==", uint64_t{0xafafc44961078ecb}},
+      {"ZzO3mVCj4xTT2TT3XqDyEKj2BZQBvrS8RHg=", uint64_t{0x4f7bb45549250094}},
+      {"+klp5iPQGtppan5MflEls0iEUzqU+zGZkDJX", uint64_t{0xa30061abaa2818c}},
+      {"RO6bvOnlJc8I9eniXlNgqtKy0IX6VNg16NRmgg==",
+       uint64_t{0xd902ee3e44a5705f}},
+      {"ZJjZqId1ZXBaij9igClE3nyliU5XWdNRrayGlYA=", uint64_t{0x316d36da516f583}},
+      {"7BfkhfGMDGbxfMB8uyL85GbaYQtjr2K8g7RpLzr/",
+       uint64_t{0x402d83f9f834f616}},
+      {"rycWk6wHH7htETQtje9PidS2YzXBx+Qkg2fY7ZYS7A==",
+       uint64_t{0x9c604164c016b72c}},
+      {"RTkC2OUK+J13CdGllsH0H5WqgspsSa6QzRZouqx6pvI=",
+       uint64_t{0x3f4507e01f9e73ba}},
+      {"tKjKmbLCNyrLCM9hycOAXm4DKNpM12oZ7dLTmUx5iwAi",
+       uint64_t{0xc3fe0d5be8d2c7c7}},
+      {"VprUGNH+5NnNRaORxgH/ySrZFQFDL+4VAodhfBNinmn8cg==",
+       uint64_t{0x531858a40bfa7ea1}},
+      {"gc1xZaY+q0nPcUvOOnWnT3bqfmT/geth/f7Dm2e/DemMfk4=",
+       uint64_t{0x86689478a7a7e8fa}},
+      {"Mr35fIxqx1ukPAL0su1yFuzzAU3wABCLZ8+ZUFsXn47UmAph",
+       uint64_t{0x4ec948b8e7f27288}},
+      {"A9G8pw2+m7+rDtWYAdbl8tb2fT7FFo4hLi2vAsa5Y8mKH3CX3g==",
+       uint64_t{0xce46c7213c10032}},
+      {"DFaJGishGwEHDdj9ixbCoaTjz9KS0phLNWHVVdFsM93CvPft3hM=",
+       uint64_t{0xf63e96ee6f32a8b6}},
+      {"7+Ugx+Kr3aRNgYgcUxru62YkTDt5Hqis+2po81hGBkcrJg4N0uuy",
+       uint64_t{0x1cfe85e65fc5225}},
+      {"H2w6O8BUKqu6Tvj2xxaecxEI2wRgIgqnTTG1WwOgDSINR13Nm4d4Vg==",
+       uint64_t{0x45c474f1cee1d2e8}},
+      {"1XBMnIbqD5jy65xTDaf6WtiwtdtQwv1dCVoqpeKj+7cTR1SaMWMyI04=",
+       uint64_t{0x6e024e14015f329c}},
+      {"znZbdXG2TSFrKHEuJc83gPncYpzXGbAebUpP0XxzH0rpe8BaMQ17nDbt",
+       uint64_t{0x760c40502103ae1c}},
+      {"ylu8Atu13j1StlcC1MRMJJXIl7USgDDS22HgVv0WQ8hx/8pNtaiKB17hCQ==",
+       uint64_t{0x17fd05c3c560c320}},
+      {"M6ZVVzsd7vAvbiACSYHioH/440dp4xG2mLlBnxgiqEvI/aIEGpD0Sf4VS0g=",
+       uint64_t{0x8b34200a6f8e90d9}},
+      {"li3oFSXLXI+ubUVGJ4blP6mNinGKLHWkvGruun85AhVn6iuMtocbZPVhqxzn",
+       uint64_t{0x6be89e50818bdf69}},
+      {"kFuQHuUCqBF3Tc3hO4dgdIp223ShaCoog48d5Do5zMqUXOh5XpGK1t5XtxnfGA==",
+       uint64_t{0xfb389773315b47d8}},
+      {"jWmOad0v0QhXVJd1OdGuBZtDYYS8wBVHlvOeTQx9ZZnm8wLEItPMeihj72E0nWY=",
+       uint64_t{0x4f2512a23f61efee}},
+      {"z+DHU52HaOQdW4JrZwDQAebEA6rm13Zg/9lPYA3txt3NjTBqFZlOMvTRnVzRbl23",
+       uint64_t{0x59ccd92fc16c6fda}},
+      {"MmBiGDfYeTayyJa/tVycg+rN7f9mPDFaDc+23j0TlW9094er0ADigsl4QX7V3gG/qw==",
+       uint64_t{0x25c5a7f5bd330919}},
+      {"774RK+9rOL4iFvs1q2qpo/JVc/I39buvNjqEFDtDvyoB0FXxPI2vXqOrk08VPfIHkmU=",
+       uint64_t{0x51df4174d34c97d7}},
+      {"+slatXiQ7/2lK0BkVUI1qzNxOOLP3I1iK6OfHaoxgqT63FpzbElwEXSwdsryq3UlHK0I",
+       uint64_t{0x80ce6d76f89cb57}},
+      {"64mVTbQ47dHjHlOHGS/hjJwr/"
+       "K2frCNpn87exOqMzNUVYiPKmhCbfS7vBUce5tO6Ec9osQ==",
+       uint64_t{0x20961c911965f684}},
+      {"fIsaG1r530SFrBqaDj1kqE0AJnvvK8MNEZbII2Yw1OK77v0V59xabIh0B5axaz/"
+       "+a2V5WpA=",
+       uint64_t{0x4e5b926ec83868e7}},
+      {"PGih0zDEOWCYGxuHGDFu9Ivbff/"
+       "iE7BNUq65tycTR2R76TerrXALRosnzaNYO5fjFhTi+CiS",
+       uint64_t{0x3927b30b922eecef}},
+      {"RnpA/"
+       "zJnEnnLjmICORByRVb9bCOgxF44p3VMiW10G7PvW7IhwsWajlP9kIwNA9FjAD2GoQHk2Q="
+       "=",
+       uint64_t{0xbd0291284a49b61c}},
+      {"qFklMceaTHqJpy2qavJE+EVBiNFOi6OxjOA3LeIcBop1K7w8xQi3TrDk+"
+       "BrWPRIbfprszSaPfrI=",
+       uint64_t{0x73a77c575bcc956}},
+      {"cLbfUtLl3EcQmITWoTskUR8da/VafRDYF/ylPYwk7/"
+       "zazk6ssyrzxMN3mmSyvrXR2yDGNZ3WDrTT",
+       uint64_t{0x766a0e2ade6d09a6}},
+      {"s/"
+       "Jf1+"
+       "FbsbCpXWPTUSeWyMH6e4CvTFvPE5Fs6Z8hvFITGyr0dtukHzkI84oviVLxhM1xMxrMAy1db"
+       "w==",
+       uint64_t{0x2599f4f905115869}},
+      {"FvyQ00+j7nmYZVQ8hI1Edxd0AWplhTfWuFGiu34AK5X8u2hLX1bE97sZM0CmeLe+"
+       "7LgoUT1fJ/axybE=",
+       uint64_t{0xd8256e5444d21e53}},
+      {"L8ncxMaYLBH3g9buPu8hfpWZNlOF7nvWLNv9IozH07uQsIBWSKxoPy8+"
+       "LW4tTuzC6CIWbRGRRD1sQV/4",
+       uint64_t{0xf664a91333fb8dfd}},
+      {"CDK0meI07yrgV2kQlZZ+"
+       "wuVqhc2NmzqeLH7bmcA6kchsRWFPeVF5Wqjjaj556ABeUoUr3yBmfU3kWOakkg==",
+       uint64_t{0x9625b859be372cd1}},
+      {"d23/vc5ONh/"
+       "HkMiq+gYk4gaCNYyuFKwUkvn46t+dfVcKfBTYykr4kdvAPNXGYLjM4u1YkAEFpJP+"
+       "nX7eOvs=",
+       uint64_t{0x7b99940782e29898}},
+      {"NUR3SRxBkxTSbtQORJpu/GdR6b/h6sSGfsMj/KFd99ahbh+9r7LSgSGmkGVB/"
+       "mGoT0pnMTQst7Lv2q6QN6Vm",
+       uint64_t{0x4fe12fa5383b51a8}},
+      {"2BOFlcI3Z0RYDtS9T9Ie9yJoXlOdigpPeeT+CRujb/"
+       "O39Ih5LPC9hP6RQk1kYESGyaLZZi3jtabHs7DiVx/VDg==",
+       uint64_t{0xe2ccb09ac0f5b4b6}},
+      {"FF2HQE1FxEvWBpg6Z9zAMH+Zlqx8S1JD/"
+       "wIlViL6ZDZY63alMDrxB0GJQahmAtjlm26RGLnjW7jmgQ4Ie3I+014=",
+       uint64_t{0x7d0a37adbd7b753b}},
+      {"tHmO7mqVL/PX11nZrz50Hc+M17Poj5lpnqHkEN+4bpMx/"
+       "YGbkrGOaYjoQjgmt1X2QyypK7xClFrjeWrCMdlVYtbW",
+       uint64_t{0xd3ae96ef9f7185f2}},
+      {"/WiHi9IQcxRImsudkA/KOTqGe8/"
+       "gXkhKIHkjddv5S9hi02M049dIK3EUyAEjkjpdGLUs+BN0QzPtZqjIYPOgwsYE9g==",
+       uint64_t{0x4fb88ea63f79a0d8}},
+      {"qds+1ExSnU11L4fTSDz/QE90g4Jh6ioqSh3KDOTOAo2pQGL1k/"
+       "9CCC7J23YF27dUTzrWsCQA2m4epXoCc3yPHb3xElA=",
+       uint64_t{0xed564e259bb5ebe9}},
+      {"8FVYHx40lSQPTHheh08Oq0/"
+       "pGm2OlG8BEf8ezvAxHuGGdgCkqpXIueJBF2mQJhTfDy5NncO8ntS7vaKs7sCNdDaNGOEi",
+       uint64_t{0x3e3256b60c428000}},
+      {"4ZoEIrJtstiCkeew3oRzmyJHVt/pAs2pj0HgHFrBPztbQ10NsQ/"
+       "lM6DM439QVxpznnBSiHMgMQJhER+70l72LqFTO1JiIQ==",
+       uint64_t{0xfb05bad59ec8705}},
+      {"hQPtaYI+wJyxXgwD5n8jGIKFKaFA/"
+       "P83KqCKZfPthnjwdOFysqEOYwAaZuaaiv4cDyi9TyS8hk5cEbNP/jrI7q6pYGBLbsM=",
+       uint64_t{0xafdc251dbf97b5f8}},
+      {"S4gpMSKzMD7CWPsSfLeYyhSpfWOntyuVZdX1xSBjiGvsspwOZcxNKCRIOqAA0moUfOh3I5+"
+       "juQV4rsqYElMD/gWfDGpsWZKQ",
+       uint64_t{0x10ec9c92ddb5dcbc}},
+      {"oswxop+"
+       "bthuDLT4j0PcoSKby4LhF47ZKg8K17xxHf74UsGCzTBbOz0MM8hQEGlyqDT1iUiAYnaPaUp"
+       "L2mRK0rcIUYA4qLt5uOw==",
+       uint64_t{0x9a767d5822c7dac4}},
+      {"0II/"
+       "697p+"
+       "BtLSjxj5989OXI004TogEb94VUnDzOVSgMXie72cuYRvTFNIBgtXlKfkiUjeqVpd4a+"
+       "n5bxNOD1TGrjQtzKU5r7obo=",
+       uint64_t{0xee46254080d6e2db}},
+      {"E84YZW2qipAlMPmctrg7TKlwLZ68l4L+c0xRDUfyyFrA4MAti0q9sHq3TDFviH0Y+"
+       "Kq3tEE5srWFA8LM9oomtmvm5PYxoaarWPLc",
+       uint64_t{0xbbb669588d8bf398}},
+      {"x3pa4HIElyZG0Nj7Vdy9IdJIR4izLmypXw5PCmZB5y68QQ4uRaVVi3UthsoJROvbjDJkP2D"
+       "Q6L/eN8pFeLFzNPKBYzcmuMOb5Ull7w==",
+       uint64_t{0xdc2afaa529beef44}},
+      {"jVDKGYIuWOP/"
+       "QKLdd2wi8B2VJA8Wh0c8PwrXJVM8FOGM3voPDVPyDJOU6QsBDPseoR8uuKd19OZ/"
+       "zAvSCB+zlf6upAsBlheUKgCfKww=",
+       uint64_t{0xf1f67391d45013a8}},
+      {"mkquunhmYe1aR2wmUz4vcvLEcKBoe6H+kjUok9VUn2+eTSkWs4oDDtJvNCWtY5efJwg/"
+       "j4PgjRYWtqnrCkhaqJaEvkkOwVfgMIwF3e+d",
+       uint64_t{0x16fce2b8c65a3429}},
+      {"fRelvKYonTQ+s+rnnvQw+JzGfFoPixtna0vzcSjiDqX5s2Kg2//"
+       "UGrK+AVCyMUhO98WoB1DDbrsOYSw2QzrcPe0+3ck9sePvb+Q/IRaHbw==",
+       uint64_t{0xf4b096699f49fe67}},
+      {"DUwXFJzagljo44QeJ7/"
+       "6ZKw4QXV18lhkYT2jglMr8WB3CHUU4vdsytvw6AKv42ZcG6fRkZkq9fpnmXy6xG0aO3WPT1"
+       "eHuyFirAlkW+zKtwg=",
+       uint64_t{0xca584c4bc8198682}},
+      {"cYmZCrOOBBongNTr7e4nYn52uQUy2mfe48s50JXx2AZ6cRAt/"
+       "xRHJ5QbEoEJOeOHsJyM4nbzwFm++SlT6gFZZHJpkXJ92JkR86uS/eV1hJUR",
+       uint64_t{0xed269fc3818b6aad}},
+      {"EXeHBDfhwzAKFhsMcH9+2RHwV+mJaN01+9oacF6vgm8mCXRd6jeN9U2oAb0of5c5cO4i+"
+       "Vb/LlHZSMI490SnHU0bejhSCC2gsC5d2K30ER3iNA==",
+       uint64_t{0x33f253cbb8fe66a8}},
+      {"FzkzRYoNjkxFhZDso94IHRZaJUP61nFYrh5MwDwv9FNoJ5jyNCY/"
+       "eazPZk+tbmzDyJIGw2h3GxaWZ9bSlsol/vK98SbkMKCQ/wbfrXRLcDzdd/8=",
+       uint64_t{0xd0b76b2c1523d99c}},
+      {"Re4aXISCMlYY/XsX7zkIFR04ta03u4zkL9dVbLXMa/q6hlY/CImVIIYRN3VKP4pnd0AUr/"
+       "ugkyt36JcstAInb4h9rpAGQ7GMVOgBniiMBZ/MGU7H",
+       uint64_t{0xfd28f0811a2a237f}},
+      {"ueLyMcqJXX+MhO4UApylCN9WlTQ+"
+       "ltJmItgG7vFUtqs2qNwBMjmAvr5u0sAKd8jpzV0dDPTwchbIeAW5zbtkA2NABJV6hFM48ib"
+       "4/J3A5mseA3cS8w==",
+       uint64_t{0x6261fb136482e84}},
+      {"6Si7Yi11L+jZMkwaN+GUuzXMrlvEqviEkGOilNq0h8TdQyYKuFXzkYc/"
+       "q74gP3pVCyiwz9KpVGMM9vfnq36riMHRknkmhQutxLZs5fbmOgEO69HglCU=",
+       uint64_t{0x458efc750bca7c3a}},
+      {"Q6AbOofGuTJOegPh9Clm/"
+       "9crtUMQqylKrTc1fhfJo1tqvpXxhU4k08kntL1RG7woRnFrVh2UoMrL1kjin+s9CanT+"
+       "y4hHwLqRranl9FjvxfVKm3yvg68",
+       uint64_t{0xa7e69ff84e5e7c27}},
+      {"ieQEbIPvqY2YfIjHnqfJiO1/MIVRk0RoaG/WWi3kFrfIGiNLCczYoklgaecHMm/"
+       "1sZ96AjO+a5stQfZbJQwS7Sc1ODABEdJKcTsxeW2hbh9A6CFzpowP1A==",
+       uint64_t{0x3c59bfd0c29efe9e}},
+      {"zQUv8hFB3zh2GGl3KTvCmnfzE+"
+       "SUgQPVaSVIELFX5H9cE3FuVFGmymkPQZJLAyzC90Cmi8GqYCvPqTuAAB//"
+       "XTJxy4bCcVArgZG9zJXpjowpNBfr3ngWrSE=",
+       uint64_t{0x10befacc6afd298d}},
+      {"US4hcC1+op5JKGC7eIs8CUgInjKWKlvKQkapulxW262E/"
+       "B2ye79QxOexf188u2mFwwe3WTISJHRZzS61IwljqAWAWoBAqkUnW8SHmIDwHUP31J0p5sGd"
+       "P47L",
+       uint64_t{0x41d5320b0a38efa7}},
+      {"9bHUWFna2LNaGF6fQLlkx1Hkt24nrkLE2CmFdWgTQV3FFbUe747SSqYw6ebpTa07MWSpWRP"
+       "sHesVo2B9tqHbe7eQmqYebPDFnNqrhSdZwFm9arLQVs+7a3Ic6A==",
+       uint64_t{0x58db1c7450fe17f3}},
+      {"Kb3DpHRUPhtyqgs3RuXjzA08jGb59hjKTOeFt1qhoINfYyfTt2buKhD6YVffRCPsgK9SeqZ"
+       "qRPJSyaqsa0ovyq1WnWW8jI/NhvAkZTVHUrX2pC+cD3OPYT05Dag=",
+       uint64_t{0x6098c055a335b7a6}},
+      {"gzxyMJIPlU+bJBwhFUCHSofZ/"
+       "319LxqMoqnt3+L6h2U2+ZXJCSsYpE80xmR0Ta77Jq54o92SMH87HV8dGOaCTuAYF+"
+       "lDL42SY1P316Cl0sZTS2ow3ZqwGbcPNs/1",
+       uint64_t{0x1bbacec67845a801}},
+      {"uR7V0TW+FGVMpsifnaBAQ3IGlr1wx5sKd7TChuqRe6OvUXTlD4hKWy8S+"
+       "8yyOw8lQabism19vOQxfmocEOW/"
+       "vzY0pEa87qHrAZy4s9fH2Bltu8vaOIe+agYohhYORQ==",
+       uint64_t{0xc419cfc7442190}},
+      {"1UR5eoo2aCwhacjZHaCh9bkOsITp6QunUxHQ2SfeHv0imHetzt/"
+       "Z70mhyWZBalv6eAx+YfWKCUib2SHDtz/"
+       "A2dc3hqUWX5VfAV7FQsghPUAtu6IiRatq4YSLpDvKZBQ=",
+       uint64_t{0xc95e510d94ba270c}},
+      {"opubR7H63BH7OtY+Avd7QyQ25UZ8kLBdFDsBTwZlY6gA/"
+       "u+x+"
+       "czC9AaZMgmQrUy15DH7YMGsvdXnviTtI4eVI4aF1H9Rl3NXMKZgwFOsdTfdcZeeHVRzBBKX"
+       "8jUfh1il",
+       uint64_t{0xff1ae05c98089c3f}},
+      {"DC0kXcSXtfQ9FbSRwirIn5tgPri0sbzHSa78aDZVDUKCMaBGyFU6BmrulywYX8yzvwprdLs"
+       "oOwTWN2wMjHlPDqrvVHNEjnmufRDblW+nSS+xtKNs3N5xsxXdv6JXDrAB/Q==",
+       uint64_t{0x90c02b8dceced493}},
+      {"BXRBk+3wEP3Lpm1y75wjoz+PgB0AMzLe8tQ1AYU2/"
+       "oqrQB2YMC6W+9QDbcOfkGbeH+b7IBkt/"
+       "gwCMw2HaQsRFEsurXtcQ3YwRuPz5XNaw5NAvrNa67Fm7eRzdE1+hWLKtA8=",
+       uint64_t{0x9f8a76697ab1aa36}},
+      {"RRBSvEGYnzR9E45Aps/+WSnpCo/X7gJLO4DRnUqFrJCV/kzWlusLE/"
+       "6ZU6RoUf2ROwcgEvUiXTGjLs7ts3t9SXnJHxC1KiOzxHdYLMhVvgNd3hVSAXODpKFSkVXND"
+       "55G2L1W",
+       uint64_t{0x6ba1bf3d811a531d}},
+      {"jeh6Qazxmdi57pa9S3XSnnZFIRrnc6s8QLrah5OX3SB/V2ErSPoEAumavzQPkdKF1/"
+       "SfvmdL+qgF1C+Yawy562QaFqwVGq7+tW0yxP8FStb56ZRgNI4IOmI30s1Ei7iops9Uuw==",
+       uint64_t{0x6a418974109c67b4}},
+      {"6QO5nnDrY2/"
+       "wrUXpltlKy2dSBcmK15fOY092CR7KxAjNfaY+"
+       "aAmtWbbzQk3MjBg03x39afSUN1fkrWACdyQKRaGxgwq6MGNxI6W+8DLWJBHzIXrntrE/"
+       "ml6fnNXEpxplWJ1vEs4=",
+       uint64_t{0x8472f1c2b3d230a3}},
+      {"0oPxeEHhqhcFuwonNfLd5jF3RNATGZS6NPoS0WklnzyokbTqcl4BeBkMn07+fDQv83j/"
+       "BpGUwcWO05f3+DYzocfnizpFjLJemFGsls3gxcBYxcbqWYev51tG3lN9EvRE+X9+Pwww",
+       uint64_t{0x5e06068f884e73a7}},
+      {"naSBSjtOKgAOg8XVbR5cHAW3Y+QL4Pb/JO9/"
+       "oy6L08wvVRZqo0BrssMwhzBP401Um7A4ppAupbQeJFdMrysY34AuSSNvtNUy5VxjNECwiNt"
+       "gwYHw7yakDUv8WvonctmnoSPKENegQg==",
+       uint64_t{0x55290b1a8f170f59}},
+      {"vPyl8DxVeRe1OpilKb9KNwpGkQRtA94UpAHetNh+"
+       "95V7nIW38v7PpzhnTWIml5kw3So1Si0TXtIUPIbsu32BNhoH7QwFvLM+"
+       "JACgSpc5e3RjsL6Qwxxi11npwxRmRUqATDeMUfRAjxg=",
+       uint64_t{0x5501cfd83dfe706a}},
+      {"QC9i2GjdTMuNC1xQJ74ngKfrlA4w3o58FhvNCltdIpuMhHP1YsDA78scQPLbZ3OCUgeQguY"
+       "f/vw6zAaVKSgwtaykqg5ka/4vhz4hYqWU5ficdXqClHl+zkWEY26slCNYOM5nnDlly8Cj",
+       uint64_t{0xe43ed13d13a66990}},
+      {"7CNIgQhAHX27nxI0HeB5oUTnTdgKpRDYDKwRcXfSFGP1XeT9nQF6WKCMjL1tBV6x7KuJ91G"
+       "Zz11F4c+8s+MfqEAEpd4FHzamrMNjGcjCyrVtU6y+7HscMVzr7Q/"
+       "ODLcPEFztFnwjvCjmHw==",
+       uint64_t{0xdf43bc375cf5283f}},
+      {"Qa/hC2RPXhANSospe+gUaPfjdK/yhQvfm4cCV6/pdvCYWPv8p1kMtKOX3h5/"
+       "8oZ31fsmx4Axphu5qXJokuhZKkBUJueuMpxRyXpwSWz2wELx5glxF7CM0Fn+"
+       "OevnkhUn5jsPlG2r5jYlVn8=",
+       uint64_t{0x8112b806d288d7b5}},
+      {"kUw/0z4l3a89jTwN5jpG0SHY5km/"
+       "IVhTjgM5xCiPRLncg40aqWrJ5vcF891AOq5hEpSq0bUCJUMFXgct7kvnys905HjerV7Vs1G"
+       "y84tgVJ70/2+pAZTsB/PzNOE/G6sOj4+GbTzkQu819OLB",
+       uint64_t{0xd52a18abb001cb46}},
+      {"VDdfSDbO8Tdj3T5W0XM3EI7iHh5xpIutiM6dvcJ/fhe23V/srFEkDy5iZf/"
+       "VnA9kfi2C79ENnFnbOReeuZW1b3MUXB9lgC6U4pOTuC+"
+       "jHK3Qnpyiqzj7h3ISJSuo2pob7vY6VHZo6Fn7exEqHg==",
+       uint64_t{0xe12b76a2433a1236}},
+      {"Ldfvy3ORdquM/R2fIkhH/ONi69mcP1AEJ6n/"
+       "oropwecAsLJzQSgezSY8bEiEs0VnFTBBsW+RtZY6tDj03fnb3amNUOq1b7jbqyQkL9hpl+"
+       "2Z2J8IaVSeownWl+bQcsR5/xRktIMckC5AtF4YHfU=",
+       uint64_t{0x175bf7319cf1fa00}},
+      {"BrbNpb42+"
+       "VzZAjJw6QLirXzhweCVRfwlczzZ0VX2xluskwBqyfnGovz5EuX79JJ31VNXa5hTkAyQat3l"
+       "YKRADTdAdwE5PqM1N7YaMqqsqoAAAeuYVXuk5eWCykYmClNdSspegwgCuT+403JigBzi",
+       uint64_t{0xd63d57b3f67525ae}},
+      {"gB3NGHJJvVcuPyF0ZSvHwnWSIfmaI7La24VMPQVoIIWF7Z74NltPZZpx2f+cocESM+"
+       "ILzQW9p+BC8x5IWz7N4Str2WLGKMdgmaBfNkEhSHQDU0IJEOnpUt0HmjhFaBlx0/"
+       "LTmhua+rQ6Wup8ezLwfg==",
+       uint64_t{0x933faea858832b73}},
+      {"hTKHlRxx6Pl4gjG+6ksvvj0CWFicUg3WrPdSJypDpq91LUWRni2KF6+"
+       "81ZoHBFhEBrCdogKqeK+hy9bLDnx7g6rAFUjtn1+cWzQ2YjiOpz4+"
+       "ROBB7lnwjyTGWzJD1rXtlso1g2qVH8XJVigC5M9AIxM=",
+       uint64_t{0x53d061e5f8e7c04f}},
+      {"IWQBelSQnhrr0F3BhUpXUIDauhX6f95Qp+A0diFXiUK7irwPG1oqBiqHyK/SH/"
+       "9S+"
+       "rln9DlFROAmeFdH0OCJi2tFm4afxYzJTFR4HnR4cG4x12JqHaZLQx6iiu6CE3rtWBVz99oA"
+       "wCZUOEXIsLU24o2Y",
+       uint64_t{0xdb4124556dd515e0}},
+      {"TKo+l+"
+       "1dOXdLvIrFqeLaHdm0HZnbcdEgOoLVcGRiCbAMR0j5pIFw8D36tefckAS1RCFOH5IgP8yiF"
+       "T0Gd0a2hI3+"
+       "fTKA7iK96NekxWeoeqzJyctc6QsoiyBlkZerRxs5RplrxoeNg29kKDTM0K94mnhD9g==",
+       uint64_t{0x4fb31a0dd681ee71}},
+      {"YU4e7G6EfQYvxCFoCrrT0EFgVLHFfOWRTJQJ5gxM3G2b+"
+       "1kJf9YPrpsxF6Xr6nYtS8reEEbDoZJYqnlk9lXSkVArm88Cqn6d25VCx3+"
+       "49MqC0trIlXtb7SXUUhwpJK16T0hJUfPH7s5cMZXc6YmmbFuBNPE=",
+       uint64_t{0x27cc72eefa138e4c}},
+      {"/I/"
+       "eImMwPo1U6wekNFD1Jxjk9XQVi1D+"
+       "FPdqcHifYXQuP5aScNQfxMAmaPR2XhuOQhADV5tTVbBKwCDCX4E3jcDNHzCiPvViZF1W27t"
+       "xaf2BbFQdwKrNCmrtzcluBFYu0XZfc7RU1RmxK/RtnF1qHsq/O4pp",
+       uint64_t{0x44bc2dfba4bd3ced}},
+      {"CJTT9WGcY2XykTdo8KodRIA29qsqY0iHzWZRjKHb9alwyJ7RZAE3V5Juv4MY3MeYEr1EPCC"
+       "MxO7yFXqT8XA8YTjaMp3bafRt17Pw8JC4iKJ1zN+WWKOESrj+"
+       "3aluGQqn8z1EzqY4PH7rLG575PYeWsP98BugdA==",
+       uint64_t{0x242da1e3a439bed8}},
+      {"ZlhyQwLhXQyIUEnMH/"
+       "AEW27vh9xrbNKJxpWGtrEmKhd+nFqAfbeNBQjW0SfG1YI0xQkQMHXjuTt4P/"
+       "EpZRtA47ibZDVS8TtaxwyBjuIDwqcN09eCtpC+Ls+"
+       "vWDTLmBeDM3u4hmzz4DQAYsLiZYSJcldg9Q3wszw=",
+       uint64_t{0xdc559c746e35c139}},
+      {"v2KU8y0sCrBghmnm8lzGJlwo6D6ObccAxCf10heoDtYLosk4ztTpLlpSFEyu23MLA1tJkcg"
+       "Rko04h19QMG0mOw/"
+       "wc93EXAweriBqXfvdaP85sZABwiKO+6rtS9pacRVpYYhHJeVTQ5NzrvBvi1huxAr+"
+       "xswhVMfL",
+       uint64_t{0xd0b0350275b9989}},
+      {"QhKlnIS6BuVCTQsnoE67E/"
+       "yrgogE8EwO7xLaEGei26m0gEU4OksefJgppDh3X0x0Cs78Dr9IHK5b977CmZlrTRmwhlP8p"
+       "M+UzXPNRNIZuN3ntOum/QhUWP8SGpirheXENWsXMQ/"
+       "nxtxakyEtrNkKk471Oov9juP8oQ==",
+       uint64_t{0xb04489e41d17730c}},
+      {"/ZRMgnoRt+Uo6fUPr9FqQvKX7syhgVqWu+"
+       "WUSsiQ68UlN0efSP6Eced5gJZL6tg9gcYJIkhjuQNITU0Q3TjVAnAcobgbJikCn6qZ6pRxK"
+       "BY4MTiAlfGD3T7R7hwJwx554MAy++Zb/YUFlnCaCJiwQMnowF7aQzwYFCo=",
+       uint64_t{0x2217285eb4572156}},
+      {"NB7tU5fNE8nI+SXGfipc7sRkhnSkUF1krjeo6k+8FITaAtdyz+"
+       "o7mONgXmGLulBPH9bEwyYhKNVY0L+njNQrZ9YC2aXsFD3PdZsxAFaBT3VXEzh+"
+       "NGBTjDASNL3mXyS8Yv1iThGfHoY7T4aR0NYGJ+k+pR6f+KrPC96M",
+       uint64_t{0x12c2e8e68aede73b}},
+      {"8T6wrqCtEO6/rwxF6lvMeyuigVOLwPipX/FULvwyu+1wa5sQGav/"
+       "2FsLHUVn6cGSi0LlFwLewGHPFJDLR0u4t7ZUyM//"
+       "x6da0sWgOa5hzDqjsVGmjxEHXiaXKW3i4iSZNuxoNbMQkIbVML+"
+       "DkYu9ND0O2swg4itGeVSzXA==",
+       uint64_t{0x4d612125bdc4fd00}},
+      {"Ntf1bMRdondtMv1CYr3G80iDJ4WSAlKy5H34XdGruQiCrnRGDBa+"
+       "eUi7vKp4gp3BBcVGl8eYSasVQQjn7MLvb3BjtXx6c/"
+       "bCL7JtpzQKaDnPr9GWRxpBXVxKREgMM7d8lm35EODv0w+"
+       "hQLfVSh8OGs7fsBb68nNWPLeeSOo=",
+       uint64_t{0x81826b553954464e}},
+      {"VsSAw72Ro6xks02kaiLuiTEIWBC5bgqr4WDnmP8vglXzAhixk7td926rm9jNimL+"
+       "kroPSygZ9gl63aF5DCPOACXmsbmhDrAQuUzoh9ZKhWgElLQsrqo1KIjWoZT5b5QfVUXY9lS"
+       "IBg3U75SqORoTPq7HalxxoIT5diWOcJQi",
+       uint64_t{0xc2e5d345dc0ddd2d}},
+      {"j+loZ+C87+"
+       "bJxNVebg94gU0mSLeDulcHs84tQT7BZM2rzDSLiCNxUedHr1ZWJ9ejTiBa0dqy2I2ABc++"
+       "xzOLcv+//YfibtjKtYggC6/3rv0XCc7xu6d/"
+       "O6xO+XOBhOWAQ+IHJVHf7wZnDxIXB8AUHsnjEISKj7823biqXjyP3g==",
+       uint64_t{0x3da6830a9e32631e}},
+      {"f3LlpcPElMkspNtDq5xXyWU62erEaKn7RWKlo540gR6mZsNpK1czV/"
+       "sOmqaq8XAQLEn68LKj6/"
+       "cFkJukxRzCa4OF1a7cCAXYFp9+wZDu0bw4y63qbpjhdCl8GO6Z2lkcXy7KOzbPE01ukg7+"
+       "gN+7uKpoohgAhIwpAKQXmX5xtd0=",
+       uint64_t{0xc9ae5c8759b4877a}},
+  };
+
+#if defined(ABSL_IS_BIG_ENDIAN)
+  constexpr uint64_t kGolden[kNumGoldenOutputs] = {
+      0xe5a40d39ab796423, 0x1766974bf7527d81, 0x5c3bbbe230db17a8,
+      0xa6630143a7e6aa6f, 0x17645cb7318b86b,  0x218b175f30ba61f8,
+      0xa6564b468248c683, 0xef192f401b116e1c, 0xbe8dc0c54617639d,
+      0xe7b01610fc22dbb8, 0x99d9f694404af913, 0xf4eecd37464b45c5,
+      0x7d2c653d63596d9b, 0x3f15c8544ec5393a, 0x6b9dc0c1704f796c,
+      0xf1ded7a7eae5ed5a, 0x2db2fd7c6dd4641b, 0x151ca2d3d4cd33ab,
+      0xa5af5994ac2ccd64, 0x2b2a4ca3191d2fce, 0xf89e68c9364e7c05,
+      0x71724c70b799c21,  0x70536fabfd157369, 0xdee92794c3c3082b,
+      0xac033a6743d3b3eb, 0xed2956b506cd5151, 0xbd669644755264b6,
+      0x6ab1ff5d5f549a63, 0xf6bd551a2e3e04e,  0x7b5a8cef6875ea73,
+      0x22bccf4d4db0a91c, 0x4f2bc07754c7c7eb, 0xfb6b8342a86725db,
+      0x13a1a0d4c5854da,  0x5f6e44655f7dedac, 0x54a9198dff2bdf85,
+      0xdb17e6915d4e4042, 0xa69926cf5c3b89f,  0xf77f031bfd74c096,
+      0x1d6f916fdd50ec3c, 0x334ac76013ade393, 0x99370f899111de15,
+      0x352457a03ada6de,  0x341974d4f42d854d, 0xda89ab02872aeb5,
+      0x6ec2b74e143b10d9, 0x6f284c0b5cd60522, 0xf9670de353438f88,
+      0xde920913adf0a2b4, 0xb7a07d7c0c17a8ec, 0x879a69f558ba3a98,
+      0x360cf6d802df20f9, 0x53530f8046673738, 0xbd8f5f2bcf35e483,
+      0x3f171f047144b983, 0x644d04e820823465, 0x50e44773a20b2702,
+      0xe584ed4c05c745dd, 0x9a825c85b95ab6c0, 0xbce2931deb74e775,
+      0x10468e9e705c7cfe, 0x12e01de3104141e2, 0x5c11ae2ee3713abd,
+      0x6ac5ffb0860319e6, 0xc1e6da1849d30fc9, 0xa0e4d247a458b447,
+      0x4530d4615c32b89b, 0x116aa09107a76505, 0xf941339d00d9bb73,
+      0x573a0fc1615afb33, 0xa975c81dc868b258, 0x3ab2c5250ab54bda,
+      0x37f99f208a3e3b11, 0x4b49b0ff706689d,  0x30bafa0b8f0a87fe,
+      0xea6787a65cc20cdd, 0x55861729f1fc3ab8, 0xea38e009c5be9b72,
+      0xcb8522cba33c3c66, 0x352e77653fe306f3, 0xe0bb760793bac064,
+      0xf66ec59322662956, 0x637aa320455d56f8, 0x46ee546be5824a89,
+      0x9e6842421e83d8a4, 0xf98ac2bc96b9fb8c, 0xf2c1002fd9a70b99,
+      0x4c2b62b1e39e9405, 0x3248555fa3ade9c4, 0xd4d04c37f6417c21,
+      0xf40cd506b1bf5653, 0x6c45d6005c760d2f, 0x61d88a7e61ff0d7e,
+      0x131591e8a53cc967, 0xdae85cb9bc29bab6, 0xe98835334905e626,
+      0x7cce50a2b66b8754, 0x5b0b3d0c5ac498ae, 0xd35a218c974d1756,
+      0xfce436ddc1d003c,  0xd183901de90bb741, 0x9378f8f34974a66,
+      0x21f11ae0a0402368, 0xf2fbd7c94ef89cb6, 0xc329c69d0f0d080b,
+      0xf2841cba16216a61, 0x47aba97b44916df1, 0x724d4e00a8019fcf,
+      0x2df9005c2a728d63, 0xc788892a1a5d7515, 0x9e993a65f9df0480,
+      0x76876721ff49f969, 0xbe7a796cfba15bf5, 0xa4c8bd54586f5488,
+      0xb390a325275501ab, 0x893f11317427ccf1, 0x92f2bb57da5695b9,
+      0x30985b90da88269f, 0x2c690e268e086de8, 0x1c02df6097997196,
+      0x1f9778f8bbdf6455, 0x7d57378c7bf8416d, 0xba8582a5f8d84d38,
+      0xe8ca43b85050be4e, 0x5048cf6bed8a5d9f, 0xfbc5ba80917d0ea4,
+      0x8011026525bf1691, 0x26b8dc6aed9fb50d, 0x191f5bfee77c1fe3,
+      0xdd497891465a2cc1, 0x6f1fe8c57a33072e, 0x2c9f4ec078c460c0,
+      0x9a725bde8f6a1437, 0x6ce545fa3ef61e4d,
+  };
+#else
+  constexpr uint64_t kGolden[kNumGoldenOutputs] = {
+      0xe5a40d39ab796423, 0x1766974bf7527d81, 0x5c3bbbe230db17a8,
+      0xa6630143a7e6aa6f, 0x8787cb2d04b0c984, 0x33603654ff574ac2,
+      0xa6564b468248c683, 0xef192f401b116e1c, 0xbe8dc0c54617639d,
+      0x93d7f665b5521c8e, 0x646d70bb42445f28, 0x96a7b1e3cc9bd426,
+      0x76020289ab0790c4, 0x39f842e4133b9b44, 0x2b8d7047be4bcaab,
+      0x99628abef6716a97, 0x4432e02ba42b2740, 0x74d810efcad7918a,
+      0x88c84e986002507f, 0x4f99acf193cf39b9, 0xd90e7a3655891e37,
+      0x3bb378b1d4df8fcf, 0xf78e94045c052d47, 0x26da0b2130da6b40,
+      0x30b4d426af8c6986, 0x5413b4aaf3baaeae, 0x756ab265370a1597,
+      0xdaf5f4b7d09814fb, 0x8f874ae37742b75e, 0x8fecd03956121ce8,
+      0x229c292ea7a08285, 0x0bb4bf0692d14bae, 0x207b24ca3bdac1db,
+      0x64f6cd6745d3825b, 0xa2b2e1656b58df1e, 0x0d01d30d9ee7a148,
+      0x1cb4cd00ab804e3b, 0x4697f2637fd90999, 0x8383a756b5688c07,
+      0x695c29cb3696a975, 0xda2e5a5a5e971521, 0x7935d4befa056b2b,
+      0x38dd541ca95420fe, 0xcc06c7a4963f967f, 0xbf0f6f66e232fb20,
+      0xf7efb32d373fe71a, 0xe2e64634b1c12660, 0x285b8fd1638e306d,
+      0x658e8a4e3b714d6c, 0xf391fb968e0eb398, 0x744a9ea0cc144bf2,
+      0x12636f2be11012f1, 0x29c57de825948f80, 0x58c6f99ab0d1c021,
+      0x13e7b5a7b82fe3bb, 0x10fbc87901e02b63, 0xa24c9184901b748b,
+      0xcac4fd4c5080e581, 0xc38bdb7483ba68e1, 0xdb2a8069b2ceaffa,
+      0xdf9fe91d0d1c7887, 0xe83f49e96e2e6a08, 0x0c69e61b62ca2b62,
+      0xb4a4f3f85f8298fe, 0x167a1b39e1e95f41, 0xf8a2a5649855ee41,
+      0x27992565b595c498, 0x3e08cca5b71f9346, 0xad406b10c770a6d2,
+      0xd1713ce6e552bcf2, 0x753b287194c73ad3, 0x5ae41a95f600af1c,
+      0x4a61163b86a8bb4c, 0x42eeaa79e760c7e4, 0x698df622ef465b0a,
+      0x157583111e1a6026, 0xaa1388f078e793e0, 0xf10d68d0f3309360,
+      0x2af056184457a3de, 0x6d0058e1590b2489, 0x638f287f68817f12,
+      0xc46b71fecefd5467, 0x2c8e94679d964e0a, 0x8612b797ce22503a,
+      0x59f929babfba7170, 0x9527556923fb49a0, 0x1039ab644f5e150b,
+      0x7816c83f3aa05e6d, 0xf51d2f564518c619, 0x67d494cff03ac004,
+      0x2802d636ced1cfbb, 0xf64e20bad771cb12, 0x0b9a6cf84a83e15e,
+      0x8da6630319609301, 0x40946a86e2a996f3, 0xcab7f5997953fa76,
+      0x39129ca0e04fc465, 0x5238221fd685e1b8, 0x175130c407dbcaab,
+      0x02f20e7536c0b0df, 0x2742cb488a04ad56, 0xd6afb593879ff93b,
+      0xf50ad64caac0ca7f, 0x2ade95c4261364ae, 0x5c4f3299faacd07a,
+      0xfffe3bff0ae5e9bc, 0x1db785c0005166e4, 0xea000d962ad18418,
+      0xe42aef38359362d9, 0xc8e95657348a3891, 0xc162eca864f238c6,
+      0xbe1fb373e20579ad, 0x628a1d4f40aa6ffd, 0xa87bdb7456340f90,
+      0x5960ef3ba982c801, 0x5026586df9a431ec, 0xfe4b8a20fdf0840b,
+      0xdcb761867da7072f, 0xc10d4653667275b7, 0x727720deec13110b,
+      0x710b009662858dc9, 0xfbf8f7a3ecac1eb7, 0xb6fc4fcd0722e3df,
+      0x7cb86dcc55104aac, 0x19e71e9b45c3a51e, 0x51de38573c2bea48,
+      0xa73ab6996d6df158, 0x55ef2b8c930817b2, 0xb2850bf5fae87157,
+      0xecf3de1acd04651f, 0xcc0a40552559ff32, 0xc385c374f20315b1,
+      0xb90208a4c7234183, 0x58aa1ca7a4c075d9,
+  };
+#endif
+
+#if UPDATE_GOLDEN
+  (void)kGolden;  // Silence warning.
+  for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
+    std::string str;
+    ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str));
+    uint64_t h = absl::hash_internal::LowLevelHash(str.data(), str.size(),
+                                                   cases[i].seed, kSalt);
+    printf("0x%016" PRIx64 ", ", h);
+    if (i % 3 == 2) {
+      printf("\n");
+    }
+  }
+  printf("\n\n\n");
+  EXPECT_FALSE(true);
+#else
+  for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
+    SCOPED_TRACE(::testing::Message()
+                 << "i = " << i << "; input = " << cases[i].base64_data);
+    std::string str;
+    ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str));
+    EXPECT_EQ(absl::hash_internal::LowLevelHash(str.data(), str.size(),
+                                                cases[i].seed, kSalt),
+              kGolden[i]);
+  }
+#endif
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/hash/internal/spy_hash_state.h b/abseil-cpp/absl/hash/internal/spy_hash_state.h
index c083120..0972826 100644
--- a/abseil-cpp/absl/hash/internal/spy_hash_state.h
+++ b/abseil-cpp/absl/hash/internal/spy_hash_state.h
@@ -15,6 +15,7 @@
 #ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
 #define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
 
+#include <algorithm>
 #include <ostream>
 #include <string>
 #include <vector>
@@ -167,6 +168,24 @@
 
   using SpyHashStateImpl::HashStateBase::combine_contiguous;
 
+  template <typename CombinerT>
+  static SpyHashStateImpl RunCombineUnordered(SpyHashStateImpl state,
+                                              CombinerT combiner) {
+    UnorderedCombinerCallback cb;
+
+    combiner(SpyHashStateImpl<void>{}, std::ref(cb));
+
+    std::sort(cb.element_hash_representations.begin(),
+              cb.element_hash_representations.end());
+    state.hash_representation_.insert(state.hash_representation_.end(),
+                                      cb.element_hash_representations.begin(),
+                                      cb.element_hash_representations.end());
+    if (cb.error && cb.error->has_value()) {
+      state.error_ = std::move(cb.error);
+    }
+    return state;
+  }
+
   absl::optional<std::string> error() const {
     if (moved_from_) {
       return "Returned a moved-from instance of the hash state object.";
@@ -178,6 +197,22 @@
   template <typename U>
   friend class SpyHashStateImpl;
 
+  struct UnorderedCombinerCallback {
+    std::vector<std::string> element_hash_representations;
+    std::shared_ptr<absl::optional<std::string>> error;
+
+    // The inner spy can have a different type.
+    template <typename U>
+    void operator()(SpyHashStateImpl<U>& inner) {
+      element_hash_representations.push_back(
+          absl::StrJoin(inner.hash_representation_, ""));
+      if (inner.error_->has_value()) {
+        error = std::move(inner.error_);
+      }
+      inner = SpyHashStateImpl<void>{};
+    }
+  };
+
   // This is true if SpyHashStateImpl<T> has been passed to a call of
   // AbslHashValue with the wrong type. This detects that the user called
   // AbslHashValue directly (because the hash state type does not match).
diff --git a/abseil-cpp/absl/log/BUILD.bazel b/abseil-cpp/absl/log/BUILD.bazel
new file mode 100644
index 0000000..e141063
--- /dev/null
+++ b/abseil-cpp/absl/log/BUILD.bazel
@@ -0,0 +1,597 @@
+#
+# Copyright 2022 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# Public targets
+
+cc_library(
+    name = "absl_check",
+    hdrs = ["absl_check.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/log/internal:check_impl",
+    ],
+)
+
+cc_library(
+    name = "absl_log",
+    hdrs = ["absl_log.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/log/internal:log_impl",
+    ],
+)
+
+cc_library(
+    name = "check",
+    hdrs = ["check.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/log/internal:check_impl",
+        "//absl/log/internal:check_op",
+        "//absl/log/internal:conditions",
+        "//absl/log/internal:log_message",
+        "//absl/log/internal:strip",
+    ],
+)
+
+cc_library(
+    name = "die_if_null",
+    srcs = ["die_if_null.cc"],
+    hdrs = ["die_if_null.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/strings",
+    ],
+)
+
+cc_library(
+    name = "flags",
+    srcs = ["flags.cc"],
+    hdrs = ["flags.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:public"],
+    deps = [
+        ":globals",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/flags:flag",
+        "//absl/flags:marshalling",
+        "//absl/log/internal:config",
+        "//absl/log/internal:flags",
+        "//absl/strings",
+    ],
+    # Binaries which do not access these flags from C++ still want this library linked in.
+    alwayslink = True,
+)
+
+cc_library(
+    name = "globals",
+    srcs = ["globals.cc"],
+    hdrs = ["globals.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:public"],
+    deps = [
+        "//absl/base:atomic_hook",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/base:raw_logging_internal",
+        "//absl/hash",
+        "//absl/strings",
+    ],
+)
+
+cc_library(
+    name = "initialize",
+    srcs = ["initialize.cc"],
+    hdrs = ["initialize.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:public"],
+    deps = [
+        ":globals",
+        "//absl/base:config",
+        "//absl/log/internal:globals",
+        "//absl/time",
+    ],
+)
+
+cc_library(
+    name = "log",
+    hdrs = ["log.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/log/internal:log_impl",
+    ],
+)
+
+cc_library(
+    name = "log_entry",
+    srcs = ["log_entry.cc"],
+    hdrs = ["log_entry.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log/internal:config",
+        "//absl/strings",
+        "//absl/time",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "log_sink",
+    srcs = ["log_sink.cc"],
+    hdrs = ["log_sink.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_entry",
+        "//absl/base:config",
+    ],
+)
+
+cc_library(
+    name = "log_sink_registry",
+    hdrs = ["log_sink_registry.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_sink",
+        "//absl/base:config",
+        "//absl/log/internal:log_sink_set",
+    ],
+)
+
+cc_library(
+    name = "log_streamer",
+    hdrs = ["log_streamer.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":absl_log",
+        "//absl/base:config",
+        "//absl/base:log_severity",
+        "//absl/strings",
+        "//absl/strings:internal",
+        "//absl/types:optional",
+        "//absl/utility",
+    ],
+)
+
+cc_library(
+    name = "scoped_mock_log",
+    testonly = True,
+    srcs = ["scoped_mock_log.cc"],
+    hdrs = ["scoped_mock_log.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_entry",
+        ":log_sink",
+        ":log_sink_registry",
+        "//absl/base:config",
+        "//absl/base:log_severity",
+        "//absl/base:raw_logging_internal",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_library(
+    name = "structured",
+    hdrs = ["structured.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/log/internal:structured",
+        "//absl/strings",
+    ],
+)
+
+# Test targets
+
+cc_test(
+    name = "absl_check_test",
+    size = "small",
+    srcs = ["absl_check_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:ios",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":absl_check",
+        ":check_test_impl",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "absl_log_basic_test",
+    size = "small",
+    srcs = ["absl_log_basic_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":absl_log",
+        ":log_basic_test_impl",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "check_test",
+    size = "small",
+    srcs = ["check_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:ios",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":check",
+        ":check_test_impl",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "check_test_impl",
+    testonly = True,
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:ios",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
+    textual_hdrs = ["check_test_impl.inc"],
+    visibility = ["//visibility:private"],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/log/internal:test_helpers",
+        "//absl/status",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_test(
+    name = "die_if_null_test",
+    size = "small",
+    srcs = ["die_if_null_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":die_if_null",
+        "//absl/base:core_headers",
+        "//absl/log/internal:test_helpers",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "flags_test",
+    size = "small",
+    srcs = ["flags_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":flags",
+        ":globals",
+        ":log",
+        ":scoped_mock_log",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/flags:flag",
+        "//absl/flags:reflection",
+        "//absl/log/internal:flags",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "globals_test",
+    size = "small",
+    srcs = ["globals_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":globals",
+        ":log",
+        ":scoped_mock_log",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log/internal:globals",
+        "//absl/log/internal:test_helpers",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_basic_test",
+    size = "small",
+    srcs = ["log_basic_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log",
+        ":log_basic_test_impl",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "log_basic_test_impl",
+    testonly = True,
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    textual_hdrs = ["log_basic_test_impl.inc"],
+    visibility = ["//visibility:private"],
+    deps = [
+        "//absl/base",
+        "//absl/base:log_severity",
+        "//absl/log:globals",
+        "//absl/log:log_entry",
+        "//absl/log:scoped_mock_log",
+        "//absl/log/internal:test_actions",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_test(
+    name = "log_entry_test",
+    size = "small",
+    srcs = ["log_entry_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_entry",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log/internal:append_truncated",
+        "//absl/log/internal:format",
+        "//absl/log/internal:test_helpers",
+        "//absl/strings",
+        "//absl/time",
+        "//absl/types:span",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_format_test",
+    size = "small",
+    srcs = ["log_format_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":check",
+        ":log",
+        ":scoped_mock_log",
+        "//absl/log/internal:test_matchers",
+        "//absl/strings",
+        "//absl/strings:str_format",
+        "//absl/types:optional",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_macro_hygiene_test",
+    size = "small",
+    srcs = ["log_macro_hygiene_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log",
+        ":scoped_mock_log",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_sink_test",
+    size = "medium",
+    srcs = ["log_sink_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:ios",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":log",
+        ":log_sink",
+        ":log_sink_registry",
+        ":scoped_mock_log",
+        "//absl/base:core_headers",
+        "//absl/log/internal:test_actions",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_streamer_test",
+    size = "medium",
+    srcs = ["log_streamer_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log",
+        ":log_streamer",
+        ":scoped_mock_log",
+        "//absl/base",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log/internal:test_actions",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_modifier_methods_test",
+    size = "small",
+    srcs = ["log_modifier_methods_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log",
+        ":log_sink",
+        ":scoped_mock_log",
+        "//absl/log/internal:test_actions",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "//absl/strings",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "scoped_mock_log_test",
+    size = "small",
+    srcs = ["scoped_mock_log_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    linkstatic = 1,
+    tags = [
+        "no_test:os:ios",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":globals",
+        ":log",
+        ":scoped_mock_log",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "//absl/memory",
+        "//absl/strings",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "stripping_test",
+    size = "small",
+    srcs = ["stripping_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    # This test requires all code live in the binary (instead of shared libraries)
+    # because we test for the existence of specific literals in the binary.
+    linkstatic = 1,
+    deps = [
+        ":check",
+        ":log",
+        "//absl/base:log_severity",
+        "//absl/base:strerror",
+        "//absl/flags:program_name",
+        "//absl/log/internal:test_helpers",
+        "//absl/strings",
+        "//absl/strings:str_format",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "structured_test",
+    size = "small",
+    srcs = ["structured_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log",
+        ":scoped_mock_log",
+        ":structured",
+        "//absl/base:core_headers",
+        "//absl/log/internal:test_helpers",
+        "//absl/log/internal:test_matchers",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "log_benchmark",
+    testonly = 1,
+    srcs = ["log_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    deps = [
+        ":check",
+        ":flags",
+        ":globals",
+        ":log",
+        ":log_entry",
+        ":log_sink",
+        ":log_sink_registry",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/flags:flag",
+        "//absl/log/internal:flags",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
diff --git a/abseil-cpp/absl/log/CMakeLists.txt b/abseil-cpp/absl/log/CMakeLists.txt
new file mode 100644
index 0000000..9320ce5
--- /dev/null
+++ b/abseil-cpp/absl/log/CMakeLists.txt
@@ -0,0 +1,1042 @@
+# Copyright 2022 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Internal targets
+absl_cc_library(
+  NAME
+    log_internal_check_impl
+  SRCS
+  HDRS
+    "internal/check_impl.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log_internal_check_op
+    absl::log_internal_conditions
+    absl::log_internal_message
+    absl::log_internal_strip
+)
+
+absl_cc_library(
+  NAME
+    log_internal_check_op
+  SRCS
+    "internal/check_op.cc"
+  HDRS
+    "internal/check_op.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_internal_nullguard
+    absl::log_internal_nullstream
+    absl::log_internal_strip
+    absl::strings
+)
+
+absl_cc_library(
+  NAME
+    log_internal_conditions
+  SRCS
+    "internal/conditions.cc"
+  HDRS
+    "internal/conditions.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::core_headers
+    absl::log_internal_voidify
+)
+
+absl_cc_library(
+  NAME
+    log_internal_config
+  SRCS
+  HDRS
+    "internal/config.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+)
+
+absl_cc_library(
+  NAME
+    log_internal_flags
+  SRCS
+  HDRS
+    "internal/flags.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::flags
+)
+
+absl_cc_library(
+  NAME
+    log_internal_format
+  SRCS
+    "internal/log_format.cc"
+  HDRS
+    "internal/log_format.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_internal_append_truncated
+    absl::log_internal_config
+    absl::log_internal_globals
+    absl::log_severity
+    absl::strings
+    absl::str_format
+    absl::time
+    absl::span
+)
+
+absl_cc_library(
+  NAME
+    log_internal_globals
+  SRCS
+    "internal/globals.cc"
+  HDRS
+    "internal/globals.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_severity
+    absl::raw_logging_internal
+    absl::strings
+    absl::time
+)
+
+absl_cc_library(
+  NAME
+    log_internal_log_impl
+  SRCS
+  HDRS
+    "internal/log_impl.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log_internal_conditions
+    absl::log_internal_message
+    absl::log_internal_strip
+)
+
+absl_cc_library(
+  NAME
+    log_internal_proto
+  SRCS
+    "internal/proto.cc"
+  HDRS
+    "internal/proto.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::core_headers
+    absl::strings
+    absl::span
+)
+
+absl_cc_library(
+  NAME
+    log_internal_message
+  SRCS
+    "internal/log_message.cc"
+  HDRS
+    "internal/log_message.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::core_headers
+    absl::errno_saver
+    absl::inlined_vector
+    absl::examine_stack
+    absl::log_internal_append_truncated
+    absl::log_internal_format
+    absl::log_internal_globals
+    absl::log_internal_proto
+    absl::log_internal_log_sink_set
+    absl::log_internal_nullguard
+    absl::log_globals
+    absl::log_entry
+    absl::log_severity
+    absl::log_sink
+    absl::log_sink_registry
+    absl::memory
+    absl::raw_logging_internal
+    absl::strings
+    absl::strerror
+    absl::time
+    absl::span
+)
+
+absl_cc_library(
+  NAME
+    log_internal_log_sink_set
+  SRCS
+    "internal/log_sink_set.cc"
+  HDRS
+    "internal/log_sink_set.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+    $<$<BOOL:${ANDROID}>:-llog>
+  DEPS
+    absl::base
+    absl::cleanup
+    absl::config
+    absl::core_headers
+    absl::log_internal_config
+    absl::log_internal_globals
+    absl::log_globals
+    absl::log_entry
+    absl::log_severity
+    absl::log_sink
+    absl::raw_logging_internal
+    absl::synchronization
+    absl::span
+    absl::strings
+)
+
+absl_cc_library(
+  NAME
+    log_internal_nullguard
+  SRCS
+    "internal/nullguard.cc"
+  HDRS
+    "internal/nullguard.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+)
+
+absl_cc_library(
+  NAME
+    log_internal_nullstream
+  SRCS
+  HDRS
+    "internal/nullstream.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_severity
+    absl::strings
+)
+
+absl_cc_library(
+  NAME
+    log_internal_strip
+  SRCS
+  HDRS
+    "internal/strip.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log_internal_message
+    absl::log_internal_nullstream
+    absl::log_severity
+)
+
+absl_cc_library(
+  NAME
+    log_internal_test_actions
+  SRCS
+    "internal/test_actions.cc"
+  HDRS
+    "internal/test_actions.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_entry
+    absl::log_severity
+    absl::strings
+    absl::time
+  TESTONLY
+)
+
+absl_cc_library(
+  NAME
+    log_internal_test_helpers
+  SRCS
+    "internal/test_helpers.cc"
+  HDRS
+    "internal/test_helpers.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_globals
+    absl::log_initialize
+    absl::log_internal_globals
+    absl::log_severity
+    GTest::gtest
+  TESTONLY
+)
+
+absl_cc_library(
+  NAME
+    log_internal_test_matchers
+  SRCS
+    "internal/test_matchers.cc"
+  HDRS
+    "internal/test_matchers.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_entry
+    absl::log_internal_test_helpers
+    absl::log_severity
+    absl::strings
+    absl::time
+    GTest::gtest
+    GTest::gmock
+  TESTONLY
+)
+
+absl_cc_library(
+  NAME
+    log_internal_voidify
+  SRCS
+  HDRS
+    "internal/voidify.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+)
+
+absl_cc_library(
+  NAME
+    log_internal_append_truncated
+  SRCS
+  HDRS
+    "internal/append_truncated.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::strings
+    absl::span
+)
+
+# Public targets
+absl_cc_library(
+  NAME
+    absl_check
+  SRCS
+  HDRS
+    "absl_check.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log_internal_check_impl
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    absl_log
+  SRCS
+  HDRS
+    "absl_log.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log_internal_log_impl
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    check
+  SRCS
+  HDRS
+    "check.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log_internal_check_impl
+    absl::core_headers
+    absl::log_internal_check_op
+    absl::log_internal_conditions
+    absl::log_internal_message
+    absl::log_internal_strip
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    die_if_null
+  SRCS
+    "die_if_null.cc"
+  HDRS
+    "die_if_null.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log
+    absl::strings
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log_flags
+  SRCS
+    "flags.cc"
+  HDRS
+    "flags.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_globals
+    absl::log_severity
+    absl::log_internal_config
+    absl::log_internal_flags
+    absl::flags
+    absl::flags_marshalling
+    absl::strings
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log_globals
+  SRCS
+    "globals.cc"
+  HDRS
+    "globals.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::atomic_hook
+    absl::config
+    absl::core_headers
+    absl::hash
+    absl::log_severity
+    absl::raw_logging_internal
+    absl::strings
+)
+
+absl_cc_library(
+  NAME
+    log_initialize
+  SRCS
+    "initialize.cc"
+  HDRS
+    "initialize.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_globals
+    absl::log_internal_globals
+    absl::time
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log
+  SRCS
+  HDRS
+    "log.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log_internal_log_impl
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log_entry
+  SRCS
+    "log_entry.cc"
+  HDRS
+    "log_entry.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_internal_config
+    absl::log_severity
+    absl::span
+    absl::strings
+    absl::time
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log_sink
+  SRCS
+    "log_sink.cc"
+  HDRS
+    "log_sink.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_entry
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log_sink_registry
+  SRCS
+  HDRS
+    "log_sink_registry.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_sink
+    absl::log_internal_log_sink_set
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    log_streamer
+  SRCS
+  HDRS
+    "log_streamer.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::absl_log
+    absl::log_severity
+    absl::optional
+    absl::strings
+    absl::strings_internal
+    absl::utility
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    scoped_mock_log
+  SRCS
+    "scoped_mock_log.cc"
+  HDRS
+    "scoped_mock_log.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_entry
+    absl::log_severity
+    absl::log_sink
+    absl::log_sink_registry
+    absl::raw_logging_internal
+    absl::strings
+    GTest::gmock
+    GTest::gtest
+  PUBLIC
+  TESTONLY
+)
+
+absl_cc_library(
+  NAME
+    log_internal_structured
+  HDRS
+    "internal/structured.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_internal_message
+    absl::strings
+)
+
+absl_cc_library(
+  NAME
+    log_structured
+  HDRS
+    "structured.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::log_internal_structured
+    absl::strings
+  PUBLIC
+)
+
+# Test targets
+
+absl_cc_test(
+  NAME
+    absl_check_test
+  SRCS
+    "absl_check_test.cc"
+    "check_test_impl.inc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::absl_check
+    absl::config
+    absl::core_headers
+    absl::log_internal_test_helpers
+    absl::status
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    absl_log_basic_test
+  SRCS
+    "log_basic_test.cc"
+    "log_basic_test_impl.inc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::absl_log
+    absl::log_entry
+    absl::log_globals
+    absl::log_severity
+    absl::log_internal_test_actions
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::scoped_mock_log
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    check_test
+  SRCS
+    "check_test.cc"
+    "check_test_impl.inc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::check
+    absl::config
+    absl::core_headers
+    absl::log_internal_test_helpers
+    absl::status
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    die_if_null_test
+  SRCS
+    "die_if_null_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::die_if_null
+    absl::log_internal_test_helpers
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_basic_test
+  SRCS
+    "log_basic_test.cc"
+    "log_basic_test_impl.inc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::log
+    absl::log_entry
+    absl::log_globals
+    absl::log_severity
+    absl::log_internal_test_actions
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::scoped_mock_log
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_entry_test
+  SRCS
+    "log_entry_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::log_entry
+    absl::log_internal_append_truncated
+    absl::log_internal_format
+    absl::log_internal_globals
+    absl::log_internal_test_helpers
+    absl::log_severity
+    absl::span
+    absl::strings
+    absl::time
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_flags_test
+  SRCS
+    "flags_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_flags
+    absl::log_globals
+    absl::log_internal_flags
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::log_severity
+    absl::flags
+    absl::flags_reflection
+    absl::scoped_mock_log
+    absl::strings
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_globals_test
+  SRCS
+    "globals_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_globals
+    absl::log_internal_globals
+    absl::log_internal_test_helpers
+    absl::log_severity
+    absl::scoped_mock_log
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_format_test
+  SRCS
+    "log_format_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::check
+    absl::log
+    absl::log_internal_test_matchers
+    absl::optional
+    absl::scoped_mock_log
+    absl::str_format
+    absl::strings
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_macro_hygiene_test
+  SRCS
+    "log_macro_hygiene_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_severity
+    absl::scoped_mock_log
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_sink_test
+  SRCS
+    "log_sink_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_internal_test_actions
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::log_sink
+    absl::log_sink_registry
+    absl::log_severity
+    absl::scoped_mock_log
+    absl::strings
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_streamer_test
+  SRCS
+    "log_streamer_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::core_headers
+    absl::log
+    absl::log_internal_test_actions
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::log_streamer
+    absl::log_severity
+    absl::scoped_mock_log
+    absl::strings
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_modifier_methods_test
+  SRCS
+    "log_modifier_methods_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log
+    absl::log_internal_test_actions
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::log_sink
+    absl::scoped_mock_log
+    absl::strings
+    absl::time
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    scoped_mock_log_test
+  SRCS
+    "scoped_mock_log_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_globals
+    absl::log_internal_globals
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::log_severity
+    absl::memory
+    absl::scoped_mock_log
+    absl::strings
+    absl::synchronization
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_internal_stderr_log_sink_test
+  SRCS
+    "internal/stderr_log_sink_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_globals
+    absl::log_internal_test_helpers
+    absl::log_severity
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_stripping_test
+  SRCS
+    "stripping_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::check
+    absl::flags_program_name
+    absl::log
+    absl::log_internal_test_helpers
+    absl::log_severity
+    absl::strerror
+    absl::strings
+    absl::str_format
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    log_structured_test
+  SRCS
+    "structured_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::core_headers
+    absl::log
+    absl::log_internal_test_helpers
+    absl::log_internal_test_matchers
+    absl::log_structured
+    absl::scoped_mock_log
+    GTest::gmock
+    GTest::gtest_main
+)
diff --git a/abseil-cpp/absl/log/absl_check.h b/abseil-cpp/absl/log/absl_check.h
new file mode 100644
index 0000000..1bb43bd
--- /dev/null
+++ b/abseil-cpp/absl/log/absl_check.h
@@ -0,0 +1,117 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/absl_check.h
+// -----------------------------------------------------------------------------
+//
+// This header declares a family of `ABSL_CHECK` macros as alternative spellings
+// for `CHECK` macros in `check.h`.
+//
+// Except for those whose names begin with `ABSL_DCHECK`, these macros are not
+// controlled by `NDEBUG` (cf. `assert`), so the check will be executed
+// regardless of compilation mode. `ABSL_CHECK` and friends are thus useful for
+// confirming invariants in situations where continuing to run would be worse
+// than terminating, e.g., due to risk of data corruption or security
+// compromise.  It is also more robust and portable to deliberately terminate
+// at a particular place with a useful message and backtrace than to assume some
+// ultimately unspecified and unreliable crashing behavior (such as a
+// "segmentation fault").
+//
+// For full documentation of each macro, see comments in `check.h`, which has an
+// identical set of macros without the ABSL_* prefix.
+
+#ifndef ABSL_LOG_ABSL_CHECK_H_
+#define ABSL_LOG_ABSL_CHECK_H_
+
+#include "absl/log/internal/check_impl.h"
+
+#define ABSL_CHECK(condition) \
+  ABSL_LOG_INTERNAL_CHECK_IMPL((condition), #condition)
+#define ABSL_QCHECK(condition) \
+  ABSL_LOG_INTERNAL_QCHECK_IMPL((condition), #condition)
+#define ABSL_PCHECK(condition) \
+  ABSL_LOG_INTERNAL_PCHECK_IMPL((condition), #condition)
+#define ABSL_DCHECK(condition) \
+  ABSL_LOG_INTERNAL_DCHECK_IMPL((condition), #condition)
+
+#define ABSL_CHECK_EQ(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_CHECK_NE(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_CHECK_LE(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_CHECK_LT(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_CHECK_GE(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_CHECK_GT(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_QCHECK_EQ(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_QCHECK_NE(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_QCHECK_LE(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_QCHECK_LT(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_QCHECK_GE(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_QCHECK_GT(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_DCHECK_EQ(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_DCHECK_NE(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_DCHECK_LE(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_DCHECK_LT(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_DCHECK_GE(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define ABSL_DCHECK_GT(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+
+#define ABSL_CHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK_IMPL((status), #status)
+#define ABSL_QCHECK_OK(status) \
+  ABSL_LOG_INTERNAL_QCHECK_OK_IMPL((status), #status)
+#define ABSL_DCHECK_OK(status) \
+  ABSL_LOG_INTERNAL_DCHECK_OK_IMPL((status), #status)
+
+#define ABSL_CHECK_STREQ(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_CHECK_STRNE(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_CHECK_STRCASEEQ(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_CHECK_STRCASENE(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_QCHECK_STREQ(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_QCHECK_STRNE(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_QCHECK_STRCASEEQ(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_QCHECK_STRCASENE(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_DCHECK_STREQ(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_DCHECK_STRNE(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_DCHECK_STRCASEEQ(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_DCHECK_STRCASENE(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+
+#endif  // ABSL_LOG_ABSL_CHECK_H_
diff --git a/abseil-cpp/absl/log/absl_check_test.cc b/abseil-cpp/absl/log/absl_check_test.cc
new file mode 100644
index 0000000..d84940f
--- /dev/null
+++ b/abseil-cpp/absl/log/absl_check_test.cc
@@ -0,0 +1,58 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/absl_check.h"
+
+#define ABSL_TEST_CHECK ABSL_CHECK
+#define ABSL_TEST_CHECK_OK ABSL_CHECK_OK
+#define ABSL_TEST_CHECK_EQ ABSL_CHECK_EQ
+#define ABSL_TEST_CHECK_NE ABSL_CHECK_NE
+#define ABSL_TEST_CHECK_GE ABSL_CHECK_GE
+#define ABSL_TEST_CHECK_LE ABSL_CHECK_LE
+#define ABSL_TEST_CHECK_GT ABSL_CHECK_GT
+#define ABSL_TEST_CHECK_LT ABSL_CHECK_LT
+#define ABSL_TEST_CHECK_STREQ ABSL_CHECK_STREQ
+#define ABSL_TEST_CHECK_STRNE ABSL_CHECK_STRNE
+#define ABSL_TEST_CHECK_STRCASEEQ ABSL_CHECK_STRCASEEQ
+#define ABSL_TEST_CHECK_STRCASENE ABSL_CHECK_STRCASENE
+
+#define ABSL_TEST_DCHECK ABSL_DCHECK
+#define ABSL_TEST_DCHECK_OK ABSL_DCHECK_OK
+#define ABSL_TEST_DCHECK_EQ ABSL_DCHECK_EQ
+#define ABSL_TEST_DCHECK_NE ABSL_DCHECK_NE
+#define ABSL_TEST_DCHECK_GE ABSL_DCHECK_GE
+#define ABSL_TEST_DCHECK_LE ABSL_DCHECK_LE
+#define ABSL_TEST_DCHECK_GT ABSL_DCHECK_GT
+#define ABSL_TEST_DCHECK_LT ABSL_DCHECK_LT
+#define ABSL_TEST_DCHECK_STREQ ABSL_DCHECK_STREQ
+#define ABSL_TEST_DCHECK_STRNE ABSL_DCHECK_STRNE
+#define ABSL_TEST_DCHECK_STRCASEEQ ABSL_DCHECK_STRCASEEQ
+#define ABSL_TEST_DCHECK_STRCASENE ABSL_DCHECK_STRCASENE
+
+#define ABSL_TEST_QCHECK ABSL_QCHECK
+#define ABSL_TEST_QCHECK_OK ABSL_QCHECK_OK
+#define ABSL_TEST_QCHECK_EQ ABSL_QCHECK_EQ
+#define ABSL_TEST_QCHECK_NE ABSL_QCHECK_NE
+#define ABSL_TEST_QCHECK_GE ABSL_QCHECK_GE
+#define ABSL_TEST_QCHECK_LE ABSL_QCHECK_LE
+#define ABSL_TEST_QCHECK_GT ABSL_QCHECK_GT
+#define ABSL_TEST_QCHECK_LT ABSL_QCHECK_LT
+#define ABSL_TEST_QCHECK_STREQ ABSL_QCHECK_STREQ
+#define ABSL_TEST_QCHECK_STRNE ABSL_QCHECK_STRNE
+#define ABSL_TEST_QCHECK_STRCASEEQ ABSL_QCHECK_STRCASEEQ
+#define ABSL_TEST_QCHECK_STRCASENE ABSL_QCHECK_STRCASENE
+
+#include "gtest/gtest.h"
+#include "absl/log/check_test_impl.inc"
diff --git a/abseil-cpp/absl/log/absl_log.h b/abseil-cpp/absl/log/absl_log.h
new file mode 100644
index 0000000..0517760
--- /dev/null
+++ b/abseil-cpp/absl/log/absl_log.h
@@ -0,0 +1,103 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/absl_log.h
+// -----------------------------------------------------------------------------
+//
+// This header declares a family of `ABSL_LOG` macros as alternative spellings
+// for macros in `log.h`.
+//
+// Basic invocation looks like this:
+//
+//   ABSL_LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// Most `ABSL_LOG` macros take a severity level argument. The severity levels
+// are `INFO`, `WARNING`, `ERROR`, and `FATAL`.
+//
+// For full documentation, see comments in `log.h`, which includes full
+// reference documentation on use of the equivalent `LOG` macro and has an
+// identical set of macros without the ABSL_* prefix.
+
+#ifndef ABSL_LOG_ABSL_LOG_H_
+#define ABSL_LOG_ABSL_LOG_H_
+
+#include "absl/log/internal/log_impl.h"
+
+#define ABSL_LOG(severity) ABSL_LOG_INTERNAL_LOG_IMPL(_##severity)
+#define ABSL_PLOG(severity) ABSL_LOG_INTERNAL_PLOG_IMPL(_##severity)
+#define ABSL_DLOG(severity) ABSL_LOG_INTERNAL_DLOG_IMPL(_##severity)
+
+#define ABSL_LOG_IF(severity, condition) \
+  ABSL_LOG_INTERNAL_LOG_IF_IMPL(_##severity, condition)
+#define ABSL_PLOG_IF(severity, condition) \
+  ABSL_LOG_INTERNAL_PLOG_IF_IMPL(_##severity, condition)
+#define ABSL_DLOG_IF(severity, condition) \
+  ABSL_LOG_INTERNAL_DLOG_IF_IMPL(_##severity, condition)
+
+#define ABSL_LOG_EVERY_N(severity, n) \
+  ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(_##severity, n)
+#define ABSL_LOG_FIRST_N(severity, n) \
+  ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(_##severity, n)
+#define ABSL_LOG_EVERY_POW_2(severity) \
+  ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(_##severity)
+#define ABSL_LOG_EVERY_N_SEC(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+
+#define ABSL_PLOG_EVERY_N(severity, n) \
+  ABSL_LOG_INTERNAL_PLOG_EVERY_N_IMPL(_##severity, n)
+#define ABSL_PLOG_FIRST_N(severity, n) \
+  ABSL_LOG_INTERNAL_PLOG_FIRST_N_IMPL(_##severity, n)
+#define ABSL_PLOG_EVERY_POW_2(severity) \
+  ABSL_LOG_INTERNAL_PLOG_EVERY_POW_2_IMPL(_##severity)
+#define ABSL_PLOG_EVERY_N_SEC(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_PLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+
+#define ABSL_DLOG_EVERY_N(severity, n) \
+  ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(_##severity, n)
+#define ABSL_DLOG_FIRST_N(severity, n) \
+  ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(_##severity, n)
+#define ABSL_DLOG_EVERY_POW_2(severity) \
+  ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(_##severity)
+#define ABSL_DLOG_EVERY_N_SEC(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+
+#define ABSL_LOG_IF_EVERY_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+#define ABSL_LOG_IF_FIRST_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_LOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+#define ABSL_LOG_IF_EVERY_POW_2(severity, condition) \
+  ABSL_LOG_INTERNAL_LOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+#define ABSL_LOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
+  ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+
+#define ABSL_PLOG_IF_EVERY_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+#define ABSL_PLOG_IF_FIRST_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_PLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+#define ABSL_PLOG_IF_EVERY_POW_2(severity, condition) \
+  ABSL_LOG_INTERNAL_PLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+#define ABSL_PLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
+  ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+
+#define ABSL_DLOG_IF_EVERY_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+#define ABSL_DLOG_IF_FIRST_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+#define ABSL_DLOG_IF_EVERY_POW_2(severity, condition) \
+  ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+#define ABSL_DLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
+  ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+
+#endif  // ABSL_LOG_ABSL_LOG_H_
diff --git a/abseil-cpp/absl/debugging/leak_check_disable.cc b/abseil-cpp/absl/log/absl_log_basic_test.cc
similarity index 67%
copy from abseil-cpp/absl/debugging/leak_check_disable.cc
copy to abseil-cpp/absl/log/absl_log_basic_test.cc
index 924d6e3..3a4b83c 100644
--- a/abseil-cpp/absl/debugging/leak_check_disable.cc
+++ b/abseil-cpp/absl/log/absl_log_basic_test.cc
@@ -1,4 +1,5 @@
-// Copyright 2017 The Abseil Authors.
+//
+// Copyright 2022 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,9 +13,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Disable LeakSanitizer when this file is linked in.
-// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
-extern "C" int __lsan_is_turned_off();
-extern "C" int __lsan_is_turned_off() {
-  return 1;
-}
+#include "absl/log/absl_log.h"
+
+#define ABSL_TEST_LOG ABSL_LOG
+
+#include "gtest/gtest.h"
+#include "absl/log/log_basic_test_impl.inc"
diff --git a/abseil-cpp/absl/log/check.h b/abseil-cpp/absl/log/check.h
new file mode 100644
index 0000000..0a2f2e4
--- /dev/null
+++ b/abseil-cpp/absl/log/check.h
@@ -0,0 +1,209 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/check.h
+// -----------------------------------------------------------------------------
+//
+// This header declares a family of `CHECK` macros.
+//
+// `CHECK` macros terminate the program with a fatal error if the specified
+// condition is not true.
+//
+// Except for those whose names begin with `DCHECK`, these macros are not
+// controlled by `NDEBUG` (cf. `assert`), so the check will be executed
+// regardless of compilation mode. `CHECK` and friends are thus useful for
+// confirming invariants in situations where continuing to run would be worse
+// than terminating, e.g., due to risk of data corruption or security
+// compromise.  It is also more robust and portable to deliberately terminate
+// at a particular place with a useful message and backtrace than to assume some
+// ultimately unspecified and unreliable crashing behavior (such as a
+// "segmentation fault").
+
+#ifndef ABSL_LOG_CHECK_H_
+#define ABSL_LOG_CHECK_H_
+
+#include "absl/log/internal/check_impl.h"
+#include "absl/log/internal/check_op.h"     // IWYU pragma: export
+#include "absl/log/internal/conditions.h"   // IWYU pragma: export
+#include "absl/log/internal/log_message.h"  // IWYU pragma: export
+#include "absl/log/internal/strip.h"        // IWYU pragma: export
+
+// CHECK()
+//
+// `CHECK` terminates the program with a fatal error if `condition` is not true.
+//
+// The message may include additional information such as stack traces, when
+// available.
+//
+// Example:
+//
+//   CHECK(!cheese.empty()) << "Out of Cheese";
+//
+// Might produce a message like:
+//
+//   Check failed: !cheese.empty() Out of Cheese
+#define CHECK(condition) ABSL_LOG_INTERNAL_CHECK_IMPL((condition), #condition)
+
+// QCHECK()
+//
+// `QCHECK` behaves like `CHECK` but does not print a full stack trace and does
+// not run registered error handlers (as `QFATAL`).  It is useful when the
+// problem is definitely unrelated to program flow, e.g. when validating user
+// input.
+#define QCHECK(condition) ABSL_LOG_INTERNAL_QCHECK_IMPL((condition), #condition)
+
+// PCHECK()
+//
+// `PCHECK` behaves like `CHECK` but appends a description of the current state
+// of `errno` to the failure message.
+//
+// Example:
+//
+//   int fd = open("/var/empty/missing", O_RDONLY);
+//   PCHECK(fd != -1) << "posix is difficult";
+//
+// Might produce a message like:
+//
+//   Check failed: fd != -1 posix is difficult: No such file or directory [2]
+#define PCHECK(condition) ABSL_LOG_INTERNAL_PCHECK_IMPL((condition), #condition)
+
+// DCHECK()
+//
+// `DCHECK` behaves like `CHECK` in debug mode and does nothing otherwise (as
+// `DLOG`).  Unlike with `CHECK` (but as with `assert`), it is not safe to rely
+// on evaluation of `condition`: when `NDEBUG` is enabled, DCHECK does not
+// evaluate the condition.
+#define DCHECK(condition) ABSL_LOG_INTERNAL_DCHECK_IMPL((condition), #condition)
+
+// `CHECK_EQ` and friends are syntactic sugar for `CHECK(x == y)` that
+// automatically output the expression being tested and the evaluated values on
+// either side.
+//
+// Example:
+//
+//   int x = 3, y = 5;
+//   CHECK_EQ(2 * x, y) << "oops!";
+//
+// Might produce a message like:
+//
+//   Check failed: 2 * x == y (6 vs. 5) oops!
+//
+// The values must implement the appropriate comparison operator as well as
+// `operator<<(std::ostream&, ...)`.  Care is taken to ensure that each
+// argument is evaluated exactly once, and that anything which is legal to pass
+// as a function argument is legal here.  In particular, the arguments may be
+// temporary expressions which will end up being destroyed at the end of the
+// statement,
+//
+// Example:
+//
+//   CHECK_EQ(std::string("abc")[1], 'b');
+//
+// WARNING: Passing `NULL` as an argument to `CHECK_EQ` and similar macros does
+// not compile.  Use `nullptr` instead.
+#define CHECK_EQ(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_NE(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_LE(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_LT(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_GE(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_GT(val1, val2) \
+  ABSL_LOG_INTERNAL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_EQ(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_NE(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_LE(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_LT(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_GE(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_GT(val1, val2) \
+  ABSL_LOG_INTERNAL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_EQ(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_NE(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_LE(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_LT(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_GE(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_GT(val1, val2) \
+  ABSL_LOG_INTERNAL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+
+// `CHECK_OK` and friends validate that the provided `absl::Status` or
+// `absl::StatusOr<T>` is OK.  If it isn't, they print a failure message that
+// includes the actual status and terminate the program.
+//
+// As with all `DCHECK` variants, `DCHECK_OK` has no effect (not even
+// evaluating its argument) if `NDEBUG` is enabled.
+//
+// Example:
+//
+//   CHECK_OK(FunctionReturnsStatus(x, y, z)) << "oops!";
+//
+// Might produce a message like:
+//
+//   Check failed: FunctionReturnsStatus(x, y, z) is OK (ABORTED: timeout) oops!
+#define CHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK_IMPL((status), #status)
+#define QCHECK_OK(status) ABSL_LOG_INTERNAL_QCHECK_OK_IMPL((status), #status)
+#define DCHECK_OK(status) ABSL_LOG_INTERNAL_DCHECK_OK_IMPL((status), #status)
+
+// `CHECK_STREQ` and friends provide `CHECK_EQ` functionality for C strings,
+// i.e., nul-terminated char arrays.  The `CASE` versions are case-insensitive.
+//
+// Example:
+//
+//   CHECK_STREQ(argv[0], "./skynet");
+//
+// Note that both arguments may be temporary strings which are destroyed by the
+// compiler at the end of the current full expression.
+//
+// Example:
+//
+//   CHECK_STREQ(Foo().c_str(), Bar().c_str());
+#define CHECK_STREQ(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STRNE(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STRCASEEQ(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STRCASENE(s1, s2) \
+  ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define QCHECK_STREQ(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define QCHECK_STRNE(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define QCHECK_STRCASEEQ(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define QCHECK_STRCASENE(s1, s2) \
+  ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define DCHECK_STREQ(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define DCHECK_STRNE(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define DCHECK_STRCASEEQ(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define DCHECK_STRCASENE(s1, s2) \
+  ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+
+#endif  // ABSL_LOG_CHECK_H_
diff --git a/abseil-cpp/absl/log/check_test.cc b/abseil-cpp/absl/log/check_test.cc
new file mode 100644
index 0000000..ef415bd
--- /dev/null
+++ b/abseil-cpp/absl/log/check_test.cc
@@ -0,0 +1,58 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/check.h"
+
+#define ABSL_TEST_CHECK CHECK
+#define ABSL_TEST_CHECK_OK CHECK_OK
+#define ABSL_TEST_CHECK_EQ CHECK_EQ
+#define ABSL_TEST_CHECK_NE CHECK_NE
+#define ABSL_TEST_CHECK_GE CHECK_GE
+#define ABSL_TEST_CHECK_LE CHECK_LE
+#define ABSL_TEST_CHECK_GT CHECK_GT
+#define ABSL_TEST_CHECK_LT CHECK_LT
+#define ABSL_TEST_CHECK_STREQ CHECK_STREQ
+#define ABSL_TEST_CHECK_STRNE CHECK_STRNE
+#define ABSL_TEST_CHECK_STRCASEEQ CHECK_STRCASEEQ
+#define ABSL_TEST_CHECK_STRCASENE CHECK_STRCASENE
+
+#define ABSL_TEST_DCHECK DCHECK
+#define ABSL_TEST_DCHECK_OK DCHECK_OK
+#define ABSL_TEST_DCHECK_EQ DCHECK_EQ
+#define ABSL_TEST_DCHECK_NE DCHECK_NE
+#define ABSL_TEST_DCHECK_GE DCHECK_GE
+#define ABSL_TEST_DCHECK_LE DCHECK_LE
+#define ABSL_TEST_DCHECK_GT DCHECK_GT
+#define ABSL_TEST_DCHECK_LT DCHECK_LT
+#define ABSL_TEST_DCHECK_STREQ DCHECK_STREQ
+#define ABSL_TEST_DCHECK_STRNE DCHECK_STRNE
+#define ABSL_TEST_DCHECK_STRCASEEQ DCHECK_STRCASEEQ
+#define ABSL_TEST_DCHECK_STRCASENE DCHECK_STRCASENE
+
+#define ABSL_TEST_QCHECK QCHECK
+#define ABSL_TEST_QCHECK_OK QCHECK_OK
+#define ABSL_TEST_QCHECK_EQ QCHECK_EQ
+#define ABSL_TEST_QCHECK_NE QCHECK_NE
+#define ABSL_TEST_QCHECK_GE QCHECK_GE
+#define ABSL_TEST_QCHECK_LE QCHECK_LE
+#define ABSL_TEST_QCHECK_GT QCHECK_GT
+#define ABSL_TEST_QCHECK_LT QCHECK_LT
+#define ABSL_TEST_QCHECK_STREQ QCHECK_STREQ
+#define ABSL_TEST_QCHECK_STRNE QCHECK_STRNE
+#define ABSL_TEST_QCHECK_STRCASEEQ QCHECK_STRCASEEQ
+#define ABSL_TEST_QCHECK_STRCASENE QCHECK_STRCASENE
+
+#include "gtest/gtest.h"
+#include "absl/log/check_test_impl.inc"
diff --git a/abseil-cpp/absl/log/check_test_impl.inc b/abseil-cpp/absl/log/check_test_impl.inc
new file mode 100644
index 0000000..d5c0aee
--- /dev/null
+++ b/abseil-cpp/absl/log/check_test_impl.inc
@@ -0,0 +1,528 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_LOG_CHECK_TEST_IMPL_H_
+#define ABSL_LOG_CHECK_TEST_IMPL_H_
+
+// Verify that both sets of macros behave identically by parameterizing the
+// entire test file.
+#ifndef ABSL_TEST_CHECK
+#error ABSL_TEST_CHECK must be defined for these tests to work.
+#endif
+
+#include <ostream>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/status/status.h"
+
+// NOLINTBEGIN(misc-definitions-in-headers)
+
+namespace absl_log_internal {
+
+using ::testing::AllOf;
+using ::testing::HasSubstr;
+using ::testing::Not;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+#if GTEST_HAS_DEATH_TEST
+
+TEST(CHECKDeathTest, TestBasicValues) {
+  ABSL_TEST_CHECK(true);
+
+  EXPECT_DEATH(ABSL_TEST_CHECK(false), "Check failed: false");
+
+  int i = 2;
+  ABSL_TEST_CHECK(i != 3);  // NOLINT
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+TEST(CHECKTest, TestLogicExpressions) {
+  int i = 5;
+  ABSL_TEST_CHECK(i > 0 && i < 10);
+  ABSL_TEST_CHECK(i < 0 || i > 3);
+}
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+ABSL_CONST_INIT const auto global_var_check = [](int i) {
+  ABSL_TEST_CHECK(i > 0);  // NOLINT
+  return i + 1;
+}(3);
+
+ABSL_CONST_INIT const auto global_var = [](int i) {
+  ABSL_TEST_CHECK_GE(i, 0);  // NOLINT
+  return i + 1;
+}(global_var_check);
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG
+
+TEST(CHECKTest, TestPlacementsInCompoundStatements) {
+  // check placement inside if/else clauses
+  if (true) ABSL_TEST_CHECK(true);
+
+  if (false)
+    ;  // NOLINT
+  else
+    ABSL_TEST_CHECK(true);
+
+  switch (0)
+  case 0:
+    ABSL_TEST_CHECK(true);  // NOLINT
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+  constexpr auto var = [](int i) {
+    ABSL_TEST_CHECK(i > 0);  // NOLINT
+    return i + 1;
+  }(global_var);
+  (void)var;
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG
+}
+
+TEST(CHECKTest, TestBoolConvertible) {
+  struct Tester {
+  } tester;
+  ABSL_TEST_CHECK([&]() { return &tester; }());
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+TEST(CHECKDeathTest, TestChecksWithSideEffects) {
+  int var = 0;
+  ABSL_TEST_CHECK([&var]() {
+    ++var;
+    return true;
+  }());
+  EXPECT_EQ(var, 1);
+
+  EXPECT_DEATH(ABSL_TEST_CHECK([&var]() {
+                 ++var;
+                 return false;
+               }()) << var,
+               "Check failed: .* 2");
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+template <int a, int b>
+constexpr int sum() {
+  return a + b;
+}
+#define MACRO_ONE 1
+#define TEMPLATE_SUM(a, b) sum<a, b>()
+#define CONCAT(a, b) a b
+#define IDENTITY(x) x
+
+TEST(CHECKTest, TestPassingMacroExpansion) {
+  ABSL_TEST_CHECK(IDENTITY(true));
+  ABSL_TEST_CHECK_EQ(TEMPLATE_SUM(MACRO_ONE, 2), 3);
+  ABSL_TEST_CHECK_STREQ(CONCAT("x", "y"), "xy");
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+TEST(CHECKTest, TestMacroExpansionInMessage) {
+  auto MessageGen = []() { ABSL_TEST_CHECK(IDENTITY(false)); };
+  EXPECT_DEATH(MessageGen(), HasSubstr("IDENTITY(false)"));
+}
+
+TEST(CHECKTest, TestNestedMacroExpansionInMessage) {
+  EXPECT_DEATH(ABSL_TEST_CHECK(IDENTITY(false)), HasSubstr("IDENTITY(false)"));
+}
+
+TEST(CHECKTest, TestMacroExpansionCompare) {
+  EXPECT_DEATH(ABSL_TEST_CHECK_EQ(IDENTITY(false), IDENTITY(true)),
+               HasSubstr("IDENTITY(false) == IDENTITY(true)"));
+  EXPECT_DEATH(ABSL_TEST_CHECK_GT(IDENTITY(1), IDENTITY(2)),
+               HasSubstr("IDENTITY(1) > IDENTITY(2)"));
+}
+
+TEST(CHECKTest, TestMacroExpansionStrCompare) {
+  EXPECT_DEATH(ABSL_TEST_CHECK_STREQ(IDENTITY("x"), IDENTITY("y")),
+               HasSubstr("IDENTITY(\"x\") == IDENTITY(\"y\")"));
+  EXPECT_DEATH(ABSL_TEST_CHECK_STRCASENE(IDENTITY("a"), IDENTITY("A")),
+               HasSubstr("IDENTITY(\"a\") != IDENTITY(\"A\")"));
+}
+
+TEST(CHECKTest, TestMacroExpansionStatus) {
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_OK(IDENTITY(absl::FailedPreconditionError("message"))),
+      HasSubstr("IDENTITY(absl::FailedPreconditionError(\"message\"))"));
+}
+
+TEST(CHECKTest, TestMacroExpansionComma) {
+  EXPECT_DEATH(ABSL_TEST_CHECK(TEMPLATE_SUM(MACRO_ONE, 2) == 4),
+               HasSubstr("TEMPLATE_SUM(MACRO_ONE, 2) == 4"));
+}
+
+TEST(CHECKTest, TestMacroExpansionCommaCompare) {
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_EQ(TEMPLATE_SUM(2, MACRO_ONE), TEMPLATE_SUM(3, 2)),
+      HasSubstr("TEMPLATE_SUM(2, MACRO_ONE) == TEMPLATE_SUM(3, 2)"));
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_GT(TEMPLATE_SUM(2, MACRO_ONE), TEMPLATE_SUM(3, 2)),
+      HasSubstr("TEMPLATE_SUM(2, MACRO_ONE) > TEMPLATE_SUM(3, 2)"));
+}
+
+TEST(CHECKTest, TestMacroExpansionCommaStrCompare) {
+  EXPECT_DEATH(ABSL_TEST_CHECK_STREQ(CONCAT("x", "y"), "z"),
+               HasSubstr("CONCAT(\"x\", \"y\") == \"z\""));
+  EXPECT_DEATH(ABSL_TEST_CHECK_STRNE(CONCAT("x", "y"), "xy"),
+               HasSubstr("CONCAT(\"x\", \"y\") != \"xy\""));
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+#undef TEMPLATE_SUM
+#undef CONCAT
+#undef MACRO
+#undef ONE
+
+#if GTEST_HAS_DEATH_TEST
+
+TEST(CHECKDeachTest, TestOrderOfInvocationsBetweenCheckAndMessage) {
+  int counter = 0;
+
+  auto GetStr = [&counter]() -> std::string {
+    return counter++ == 0 ? "" : "non-empty";
+  };
+
+  EXPECT_DEATH(ABSL_TEST_CHECK(!GetStr().empty()) << GetStr(),
+               HasSubstr("non-empty"));
+}
+
+TEST(CHECKTest, TestSecondaryFailure) {
+  auto FailingRoutine = []() {
+    ABSL_TEST_CHECK(false) << "Secondary";
+    return false;
+  };
+  EXPECT_DEATH(ABSL_TEST_CHECK(FailingRoutine()) << "Primary",
+               AllOf(HasSubstr("Secondary"), Not(HasSubstr("Primary"))));
+}
+
+TEST(CHECKTest, TestSecondaryFailureInMessage) {
+  auto MessageGen = []() {
+    ABSL_TEST_CHECK(false) << "Secondary";
+    return "Primary";
+  };
+  EXPECT_DEATH(ABSL_TEST_CHECK(false) << MessageGen(),
+               AllOf(HasSubstr("Secondary"), Not(HasSubstr("Primary"))));
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+TEST(CHECKTest, TestBinaryChecksWithPrimitives) {
+  ABSL_TEST_CHECK_EQ(1, 1);
+  ABSL_TEST_CHECK_NE(1, 2);
+  ABSL_TEST_CHECK_GE(1, 1);
+  ABSL_TEST_CHECK_GE(2, 1);
+  ABSL_TEST_CHECK_LE(1, 1);
+  ABSL_TEST_CHECK_LE(1, 2);
+  ABSL_TEST_CHECK_GT(2, 1);
+  ABSL_TEST_CHECK_LT(1, 2);
+}
+
+// For testing using CHECK*() on anonymous enums.
+enum { CASE_A, CASE_B };
+
+TEST(CHECKTest, TestBinaryChecksWithEnumValues) {
+  // Tests using CHECK*() on anonymous enums.
+  ABSL_TEST_CHECK_EQ(CASE_A, CASE_A);
+  ABSL_TEST_CHECK_NE(CASE_A, CASE_B);
+  ABSL_TEST_CHECK_GE(CASE_A, CASE_A);
+  ABSL_TEST_CHECK_GE(CASE_B, CASE_A);
+  ABSL_TEST_CHECK_LE(CASE_A, CASE_A);
+  ABSL_TEST_CHECK_LE(CASE_A, CASE_B);
+  ABSL_TEST_CHECK_GT(CASE_B, CASE_A);
+  ABSL_TEST_CHECK_LT(CASE_A, CASE_B);
+}
+
+TEST(CHECKTest, TestBinaryChecksWithNullptr) {
+  const void* p_null = nullptr;
+  const void* p_not_null = &p_null;
+  ABSL_TEST_CHECK_EQ(p_null, nullptr);
+  ABSL_TEST_CHECK_EQ(nullptr, p_null);
+  ABSL_TEST_CHECK_NE(p_not_null, nullptr);
+  ABSL_TEST_CHECK_NE(nullptr, p_not_null);
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+// Test logging of various char-typed values by failing CHECK*().
+TEST(CHECKDeathTest, TestComparingCharsValues) {
+  {
+    char a = ';';
+    char b = 'b';
+    EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+                 "Check failed: a == b \\(';' vs. 'b'\\)");
+    b = 1;
+    EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+                 "Check failed: a == b \\(';' vs. char value 1\\)");
+  }
+  {
+    signed char a = ';';
+    signed char b = 'b';
+    EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+                 "Check failed: a == b \\(';' vs. 'b'\\)");
+    b = -128;
+    EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+                 "Check failed: a == b \\(';' vs. signed char value -128\\)");
+  }
+  {
+    unsigned char a = ';';
+    unsigned char b = 'b';
+    EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+                 "Check failed: a == b \\(';' vs. 'b'\\)");
+    b = 128;
+    EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+                 "Check failed: a == b \\(';' vs. unsigned char value 128\\)");
+  }
+}
+
+TEST(CHECKDeathTest, TestNullValuesAreReportedCleanly) {
+  const char* a = nullptr;
+  const char* b = nullptr;
+  EXPECT_DEATH(ABSL_TEST_CHECK_NE(a, b),
+               "Check failed: a != b \\(\\(null\\) vs. \\(null\\)\\)");
+
+  a = "xx";
+  EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b),
+               "Check failed: a == b \\(xx vs. \\(null\\)\\)");
+  EXPECT_DEATH(ABSL_TEST_CHECK_EQ(b, a),
+               "Check failed: b == a \\(\\(null\\) vs. xx\\)");
+
+  std::nullptr_t n{};
+  EXPECT_DEATH(ABSL_TEST_CHECK_NE(n, nullptr),
+               "Check failed: n != nullptr \\(\\(null\\) vs. \\(null\\)\\)");
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+TEST(CHECKTest, TestSTREQ) {
+  ABSL_TEST_CHECK_STREQ("this", "this");
+  ABSL_TEST_CHECK_STREQ(nullptr, nullptr);
+  ABSL_TEST_CHECK_STRCASEEQ("this", "tHiS");
+  ABSL_TEST_CHECK_STRCASEEQ(nullptr, nullptr);
+  ABSL_TEST_CHECK_STRNE("this", "tHiS");
+  ABSL_TEST_CHECK_STRNE("this", nullptr);
+  ABSL_TEST_CHECK_STRCASENE("this", "that");
+  ABSL_TEST_CHECK_STRCASENE(nullptr, "that");
+  ABSL_TEST_CHECK_STREQ((std::string("a") + "b").c_str(), "ab");
+  ABSL_TEST_CHECK_STREQ(std::string("test").c_str(),
+                        (std::string("te") + std::string("st")).c_str());
+}
+
+TEST(CHECKTest, TestComparisonPlacementsInCompoundStatements) {
+  // check placement inside if/else clauses
+  if (true) ABSL_TEST_CHECK_EQ(1, 1);
+  if (true) ABSL_TEST_CHECK_STREQ("c", "c");
+
+  if (false)
+    ;  // NOLINT
+  else
+    ABSL_TEST_CHECK_LE(0, 1);
+
+  if (false)
+    ;  // NOLINT
+  else
+    ABSL_TEST_CHECK_STRNE("a", "b");
+
+  switch (0)
+  case 0:
+    ABSL_TEST_CHECK_NE(1, 0);
+
+  switch (0)
+  case 0:
+    ABSL_TEST_CHECK_STRCASEEQ("A", "a");
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+  constexpr auto var = [](int i) {
+    ABSL_TEST_CHECK_GT(i, 0);
+    return i + 1;
+  }(global_var);
+  (void)var;
+
+  // CHECK_STR... checks are not supported in constexpr routines.
+  // constexpr auto var2 = [](int i) {
+  //  ABSL_TEST_CHECK_STRNE("c", "d");
+  //  return i + 1;
+  // }(global_var);
+
+#if defined(__GNUC__)
+  int var3 = (({ ABSL_TEST_CHECK_LE(1, 2); }), global_var < 10) ? 1 : 0;
+  (void)var3;
+
+  int var4 = (({ ABSL_TEST_CHECK_STREQ("a", "a"); }), global_var < 10) ? 1 : 0;
+  (void)var4;
+#endif  // __GNUC__
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG
+}
+
+TEST(CHECKTest, TestDCHECK) {
+#ifdef NDEBUG
+  ABSL_TEST_DCHECK(1 == 2) << " DCHECK's shouldn't be compiled in normal mode";
+#endif
+  ABSL_TEST_DCHECK(1 == 1);  // NOLINT(readability/check)
+  ABSL_TEST_DCHECK_EQ(1, 1);
+  ABSL_TEST_DCHECK_NE(1, 2);
+  ABSL_TEST_DCHECK_GE(1, 1);
+  ABSL_TEST_DCHECK_GE(2, 1);
+  ABSL_TEST_DCHECK_LE(1, 1);
+  ABSL_TEST_DCHECK_LE(1, 2);
+  ABSL_TEST_DCHECK_GT(2, 1);
+  ABSL_TEST_DCHECK_LT(1, 2);
+
+  // Test DCHECK on std::nullptr_t
+  const void* p_null = nullptr;
+  const void* p_not_null = &p_null;
+  ABSL_TEST_DCHECK_EQ(p_null, nullptr);
+  ABSL_TEST_DCHECK_EQ(nullptr, p_null);
+  ABSL_TEST_DCHECK_NE(p_not_null, nullptr);
+  ABSL_TEST_DCHECK_NE(nullptr, p_not_null);
+}
+
+TEST(CHECKTest, TestQCHECK) {
+  // The tests that QCHECK does the same as CHECK
+  ABSL_TEST_QCHECK(1 == 1);  // NOLINT(readability/check)
+  ABSL_TEST_QCHECK_EQ(1, 1);
+  ABSL_TEST_QCHECK_NE(1, 2);
+  ABSL_TEST_QCHECK_GE(1, 1);
+  ABSL_TEST_QCHECK_GE(2, 1);
+  ABSL_TEST_QCHECK_LE(1, 1);
+  ABSL_TEST_QCHECK_LE(1, 2);
+  ABSL_TEST_QCHECK_GT(2, 1);
+  ABSL_TEST_QCHECK_LT(1, 2);
+
+  // Tests using QCHECK*() on anonymous enums.
+  ABSL_TEST_QCHECK_EQ(CASE_A, CASE_A);
+  ABSL_TEST_QCHECK_NE(CASE_A, CASE_B);
+  ABSL_TEST_QCHECK_GE(CASE_A, CASE_A);
+  ABSL_TEST_QCHECK_GE(CASE_B, CASE_A);
+  ABSL_TEST_QCHECK_LE(CASE_A, CASE_A);
+  ABSL_TEST_QCHECK_LE(CASE_A, CASE_B);
+  ABSL_TEST_QCHECK_GT(CASE_B, CASE_A);
+  ABSL_TEST_QCHECK_LT(CASE_A, CASE_B);
+}
+
+TEST(CHECKTest, TestQCHECKPlacementsInCompoundStatements) {
+  // check placement inside if/else clauses
+  if (true) ABSL_TEST_QCHECK(true);
+
+  if (false)
+    ;  // NOLINT
+  else
+    ABSL_TEST_QCHECK(true);
+
+  if (false)
+    ;  // NOLINT
+  else
+    ABSL_TEST_QCHECK(true);
+
+  switch (0)
+  case 0:
+    ABSL_TEST_QCHECK(true);
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+  constexpr auto var = [](int i) {
+    ABSL_TEST_QCHECK(i > 0);  // NOLINT
+    return i + 1;
+  }(global_var);
+  (void)var;
+
+#if defined(__GNUC__)
+  int var2 = (({ ABSL_TEST_CHECK_LE(1, 2); }), global_var < 10) ? 1 : 0;
+  (void)var2;
+#endif  // __GNUC__
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG
+}
+
+class ComparableType {
+ public:
+  explicit ComparableType(int v) : v_(v) {}
+
+  void MethodWithCheck(int i) {
+    ABSL_TEST_CHECK_EQ(*this, i);
+    ABSL_TEST_CHECK_EQ(i, *this);
+  }
+
+  int Get() const { return v_; }
+
+ private:
+  friend bool operator==(const ComparableType& lhs, const ComparableType& rhs) {
+    return lhs.v_ == rhs.v_;
+  }
+  friend bool operator!=(const ComparableType& lhs, const ComparableType& rhs) {
+    return lhs.v_ != rhs.v_;
+  }
+  friend bool operator<(const ComparableType& lhs, const ComparableType& rhs) {
+    return lhs.v_ < rhs.v_;
+  }
+  friend bool operator<=(const ComparableType& lhs, const ComparableType& rhs) {
+    return lhs.v_ <= rhs.v_;
+  }
+  friend bool operator>(const ComparableType& lhs, const ComparableType& rhs) {
+    return lhs.v_ > rhs.v_;
+  }
+  friend bool operator>=(const ComparableType& lhs, const ComparableType& rhs) {
+    return lhs.v_ >= rhs.v_;
+  }
+  friend bool operator==(const ComparableType& lhs, int rhs) {
+    return lhs.v_ == rhs;
+  }
+  friend bool operator==(int lhs, const ComparableType& rhs) {
+    return lhs == rhs.v_;
+  }
+
+  friend std::ostream& operator<<(std::ostream& out, const ComparableType& v) {
+    return out << "ComparableType{" << v.Get() << "}";
+  }
+
+  int v_;
+};
+
+TEST(CHECKTest, TestUserDefinedCompOp) {
+  ABSL_TEST_CHECK_EQ(ComparableType{0}, ComparableType{0});
+  ABSL_TEST_CHECK_NE(ComparableType{1}, ComparableType{2});
+  ABSL_TEST_CHECK_LT(ComparableType{1}, ComparableType{2});
+  ABSL_TEST_CHECK_LE(ComparableType{1}, ComparableType{2});
+  ABSL_TEST_CHECK_GT(ComparableType{2}, ComparableType{1});
+  ABSL_TEST_CHECK_GE(ComparableType{2}, ComparableType{2});
+}
+
+TEST(CHECKTest, TestCheckInMethod) {
+  ComparableType v{1};
+  v.MethodWithCheck(1);
+}
+
+TEST(CHECKDeathTest, TestUserDefinedStreaming) {
+  ComparableType v1{1};
+  ComparableType v2{2};
+
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_EQ(v1, v2),
+      HasSubstr(
+          "Check failed: v1 == v2 (ComparableType{1} vs. ComparableType{2})"));
+}
+
+}  // namespace absl_log_internal
+
+// NOLINTEND(misc-definitions-in-headers)
+
+#endif  // ABSL_LOG_CHECK_TEST_IMPL_H_
diff --git a/abseil-cpp/absl/log/die_if_null.cc b/abseil-cpp/absl/log/die_if_null.cc
new file mode 100644
index 0000000..19c6a28
--- /dev/null
+++ b/abseil-cpp/absl/log/die_if_null.cc
@@ -0,0 +1,32 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/die_if_null.h"
+
+#include "absl/base/config.h"
+#include "absl/log/log.h"
+#include "absl/strings/str_cat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+void DieBecauseNull(const char* file, int line, const char* exprtext) {
+  LOG(FATAL).AtLocation(file, line)
+      << absl::StrCat("Check failed: '", exprtext, "' Must be non-null");
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/die_if_null.h b/abseil-cpp/absl/log/die_if_null.h
new file mode 100644
index 0000000..127a9ac
--- /dev/null
+++ b/abseil-cpp/absl/log/die_if_null.h
@@ -0,0 +1,76 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/die_if_null.h
+// -----------------------------------------------------------------------------
+//
+// This header declares macro `ABSL_DIE_IF_NULL`.
+
+#ifndef ABSL_LOG_DIE_IF_NULL_H_
+#define ABSL_LOG_DIE_IF_NULL_H_
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+
+// ABSL_DIE_IF_NULL()
+//
+// `ABSL_DIE_IF_NULL` behaves as `CHECK_NE` against `nullptr` but *also*
+// "returns" its argument.  It is useful in initializers where statements (like
+// `CHECK_NE`) can't be used.  Outside initializers, prefer `CHECK` or
+// `CHECK_NE`. `ABSL_DIE_IF_NULL` works for both raw pointers and (compatible)
+// smart pointers including `std::unique_ptr` and `std::shared_ptr`; more
+// generally, it works for any type that can be compared to nullptr_t.  For
+// types that aren't raw pointers, `ABSL_DIE_IF_NULL` returns a reference to
+// its argument, preserving the value category. Example:
+//
+//   Foo() : bar_(ABSL_DIE_IF_NULL(MethodReturningUniquePtr())) {}
+//
+// Use `CHECK(ptr)` or `CHECK(ptr != nullptr)` if the returned pointer is
+// unused.
+#define ABSL_DIE_IF_NULL(val) \
+  ::absl::log_internal::DieIfNull(__FILE__, __LINE__, #val, (val))
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// Crashes the process after logging `exprtext` annotated at the `file` and
+// `line` location. Called when `ABSL_DIE_IF_NULL` fails. Calling this function
+// generates less code than its implementation would if inlined, for a slight
+// code size reduction each time `ABSL_DIE_IF_NULL` is called.
+ABSL_ATTRIBUTE_NORETURN ABSL_ATTRIBUTE_NOINLINE void DieBecauseNull(
+    const char* file, int line, const char* exprtext);
+
+// Helper for `ABSL_DIE_IF_NULL`.
+template <typename T>
+ABSL_MUST_USE_RESULT T DieIfNull(const char* file, int line,
+                                 const char* exprtext, T&& t) {
+  if (ABSL_PREDICT_FALSE(t == nullptr)) {
+    // Call a non-inline helper function for a small code size improvement.
+    DieBecauseNull(file, line, exprtext);
+  }
+  return std::forward<T>(t);
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_DIE_IF_NULL_H_
diff --git a/abseil-cpp/absl/log/die_if_null_test.cc b/abseil-cpp/absl/log/die_if_null_test.cc
new file mode 100644
index 0000000..b0aab78
--- /dev/null
+++ b/abseil-cpp/absl/log/die_if_null_test.cc
@@ -0,0 +1,107 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/die_if_null.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/log/internal/test_helpers.h"
+
+namespace {
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+// TODO(b/69907837): Revisit these tests with the goal of making them less
+// convoluted.
+TEST(AbslDieIfNull, Simple) {
+  int64_t t;
+  void* ptr = static_cast<void*>(&t);
+  void* ref = ABSL_DIE_IF_NULL(ptr);
+  ASSERT_EQ(ptr, ref);
+
+  char* t_as_char;
+  t_as_char = ABSL_DIE_IF_NULL(reinterpret_cast<char*>(&t));
+  (void)t_as_char;
+
+  unsigned char* t_as_uchar;
+  t_as_uchar = ABSL_DIE_IF_NULL(reinterpret_cast<unsigned char*>(&t));
+  (void)t_as_uchar;
+
+  int* t_as_int;
+  t_as_int = ABSL_DIE_IF_NULL(reinterpret_cast<int*>(&t));
+  (void)t_as_int;
+
+  int64_t* t_as_int64_t;
+  t_as_int64_t = ABSL_DIE_IF_NULL(reinterpret_cast<int64_t*>(&t));
+  (void)t_as_int64_t;
+
+  std::unique_ptr<int64_t> sptr(new int64_t);
+  EXPECT_EQ(sptr.get(), ABSL_DIE_IF_NULL(sptr).get());
+  ABSL_DIE_IF_NULL(sptr).reset();
+
+  int64_t* int_ptr = new int64_t();
+  EXPECT_EQ(int_ptr, ABSL_DIE_IF_NULL(std::unique_ptr<int64_t>(int_ptr)).get());
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(DeathCheckAbslDieIfNull, Simple) {
+  void* ptr;
+  ASSERT_DEATH({ ptr = ABSL_DIE_IF_NULL(nullptr); }, "");
+  (void)ptr;
+
+  std::unique_ptr<int64_t> sptr;
+  ASSERT_DEATH(ptr = ABSL_DIE_IF_NULL(sptr).get(), "");
+}
+#endif
+
+// Ensures that ABSL_DIE_IF_NULL works with C++11's std::unique_ptr and
+// std::shared_ptr.
+TEST(AbslDieIfNull, DoesNotCompareSmartPointerToNULL) {
+  std::unique_ptr<int> up(new int);
+  EXPECT_EQ(&up, &ABSL_DIE_IF_NULL(up));
+  ABSL_DIE_IF_NULL(up).reset();
+
+  std::shared_ptr<int> sp(new int);
+  EXPECT_EQ(&sp, &ABSL_DIE_IF_NULL(sp));
+  ABSL_DIE_IF_NULL(sp).reset();
+}
+
+// Verifies that ABSL_DIE_IF_NULL returns an rvalue reference if its argument is
+// an rvalue reference.
+TEST(AbslDieIfNull, PreservesRValues) {
+  int64_t* ptr = new int64_t();
+  auto uptr = ABSL_DIE_IF_NULL(std::unique_ptr<int64_t>(ptr));
+  EXPECT_EQ(ptr, uptr.get());
+}
+
+// Verifies that ABSL_DIE_IF_NULL returns an lvalue if its argument is an
+// lvalue.
+TEST(AbslDieIfNull, PreservesLValues) {
+  int64_t array[2] = {0};
+  int64_t* a = array + 0;
+  int64_t* b = array + 1;
+  using std::swap;
+  swap(ABSL_DIE_IF_NULL(a), ABSL_DIE_IF_NULL(b));
+  EXPECT_EQ(array + 1, a);
+  EXPECT_EQ(array + 0, b);
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/flags.cc b/abseil-cpp/absl/log/flags.cc
new file mode 100644
index 0000000..215b7bd
--- /dev/null
+++ b/abseil-cpp/absl/log/flags.cc
@@ -0,0 +1,120 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/flags.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cstdlib>
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/flags/flag.h"
+#include "absl/flags/marshalling.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/config.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+namespace {
+
+void SyncLoggingFlags() {
+  absl::SetFlag(&FLAGS_minloglevel, static_cast<int>(absl::MinLogLevel()));
+  absl::SetFlag(&FLAGS_log_prefix, absl::ShouldPrependLogPrefix());
+}
+
+bool RegisterSyncLoggingFlags() {
+  log_internal::SetLoggingGlobalsListener(&SyncLoggingFlags);
+  return true;
+}
+
+ABSL_ATTRIBUTE_UNUSED const bool unused = RegisterSyncLoggingFlags();
+
+template <typename T>
+T GetFromEnv(const char* varname, T dflt) {
+  const char* val = ::getenv(varname);
+  if (val != nullptr) {
+    std::string err;
+    ABSL_INTERNAL_CHECK(absl::ParseFlag(val, &dflt, &err), err.c_str());
+  }
+  return dflt;
+}
+
+constexpr absl::LogSeverityAtLeast StderrThresholdDefault() {
+  return absl::LogSeverityAtLeast::kError;
+}
+
+}  // namespace
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+ABSL_FLAG(int, stderrthreshold,
+          static_cast<int>(absl::log_internal::StderrThresholdDefault()),
+          "Log messages at or above this threshold level are copied to stderr.")
+    .OnUpdate([] {
+      absl::log_internal::RawSetStderrThreshold(
+          static_cast<absl::LogSeverityAtLeast>(
+              absl::GetFlag(FLAGS_stderrthreshold)));
+    });
+
+ABSL_FLAG(int, minloglevel, static_cast<int>(absl::LogSeverityAtLeast::kInfo),
+          "Messages logged at a lower level than this don't actually "
+          "get logged anywhere")
+    .OnUpdate([] {
+      absl::log_internal::RawSetMinLogLevel(
+          static_cast<absl::LogSeverityAtLeast>(
+              absl::GetFlag(FLAGS_minloglevel)));
+    });
+
+ABSL_FLAG(std::string, log_backtrace_at, "",
+          "Emit a backtrace when logging at file:linenum.")
+    .OnUpdate([] {
+      const std::string log_backtrace_at =
+          absl::GetFlag(FLAGS_log_backtrace_at);
+      if (log_backtrace_at.empty()) {
+        absl::ClearLogBacktraceLocation();
+        return;
+      }
+
+      const size_t last_colon = log_backtrace_at.rfind(':');
+      if (last_colon == log_backtrace_at.npos) {
+        absl::ClearLogBacktraceLocation();
+        return;
+      }
+
+      const absl::string_view file =
+          absl::string_view(log_backtrace_at).substr(0, last_colon);
+      int line;
+      if (!absl::SimpleAtoi(
+              absl::string_view(log_backtrace_at).substr(last_colon + 1),
+              &line)) {
+        absl::ClearLogBacktraceLocation();
+        return;
+      }
+      absl::SetLogBacktraceLocation(file, line);
+    });
+
+ABSL_FLAG(bool, log_prefix, true,
+          "Prepend the log prefix to the start of each log line")
+    .OnUpdate([] {
+      absl::log_internal::RawEnableLogPrefix(absl::GetFlag(FLAGS_log_prefix));
+    });
diff --git a/abseil-cpp/absl/log/flags.h b/abseil-cpp/absl/log/flags.h
new file mode 100644
index 0000000..146cfdd
--- /dev/null
+++ b/abseil-cpp/absl/log/flags.h
@@ -0,0 +1,43 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/flags.h
+// -----------------------------------------------------------------------------
+//
+
+#ifndef ABSL_LOG_FLAGS_H_
+#define ABSL_LOG_FLAGS_H_
+
+// The Abseil Logging library supports the following command line flags to
+// configure logging behavior at runtime:
+//
+// --stderrthreshold=<value>
+// Log messages at or above this threshold level are copied to stderr.
+//
+// --minloglevel=<value>
+// Messages logged at a lower level than this are discarded and don't actually
+// get logged anywhere.
+//
+// --log_backtrace_at=<file:linenum>
+// Emit a backtrace (stack trace) when logging at file:linenum.
+//
+// To use these commandline flags, the //absl/log:flags library must be
+// explicitly linked, and absl::ParseCommandLine() must be called before the
+// call to absl::InitializeLog().
+//
+// To configure the Log library programmatically, use the interfaces defined in
+// absl/log/globals.h.
+
+#endif  // ABSL_LOG_FLAGS_H_
diff --git a/abseil-cpp/absl/log/flags_test.cc b/abseil-cpp/absl/log/flags_test.cc
new file mode 100644
index 0000000..1080ea1
--- /dev/null
+++ b/abseil-cpp/absl/log/flags_test.cc
@@ -0,0 +1,188 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/flags.h"
+
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/flags/flag.h"
+#include "absl/flags/reflection.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/log/scoped_mock_log.h"
+#include "absl/strings/str_cat.h"
+
+namespace {
+using ::absl::log_internal::TextMessage;
+
+using ::testing::HasSubstr;
+using ::testing::Not;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {
+  return absl::LogSeverityAtLeast::kError;
+}
+
+class LogFlagsTest : public ::testing::Test {
+ protected:
+  absl::FlagSaver flag_saver_;
+};
+
+// This test is disabled because it adds order dependency to the test suite.
+// This order dependency is currently not fixable due to the way the
+// stderrthreshold global value is out of sync with the stderrthreshold flag.
+TEST_F(LogFlagsTest, DISABLED_StderrKnobsDefault) {
+  EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
+}
+
+TEST_F(LogFlagsTest, SetStderrThreshold) {
+  absl::SetFlag(&FLAGS_stderrthreshold,
+                static_cast<int>(absl::LogSeverityAtLeast::kInfo));
+
+  EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kInfo);
+
+  absl::SetFlag(&FLAGS_stderrthreshold,
+                static_cast<int>(absl::LogSeverityAtLeast::kError));
+
+  EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
+}
+
+TEST_F(LogFlagsTest, SetMinLogLevel) {
+  absl::SetFlag(&FLAGS_minloglevel,
+                static_cast<int>(absl::LogSeverityAtLeast::kError));
+
+  EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
+
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(
+      absl::LogSeverityAtLeast::kWarning);
+
+  EXPECT_EQ(absl::GetFlag(FLAGS_minloglevel),
+            static_cast<int>(absl::LogSeverityAtLeast::kWarning));
+}
+
+TEST_F(LogFlagsTest, PrependLogPrefix) {
+  absl::SetFlag(&FLAGS_log_prefix, false);
+
+  EXPECT_EQ(absl::ShouldPrependLogPrefix(), false);
+
+  absl::EnableLogPrefix(true);
+
+  EXPECT_EQ(absl::GetFlag(FLAGS_log_prefix), true);
+}
+
+TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at, "");
+  LOG(INFO) << "hello world";
+}
+
+TEST_F(LogFlagsTest, BacktraceAtNonsense) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at, "gibberish");
+  LOG(INFO) << "hello world";
+}
+
+TEST_F(LogFlagsTest, BacktraceAtWrongFile) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { LOG(INFO) << "hello world"; };
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at,
+                absl::StrCat("some_other_file.cc:", log_line));
+  do_log();
+}
+
+TEST_F(LogFlagsTest, BacktraceAtWrongLine) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { LOG(INFO) << "hello world"; };
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at,
+                absl::StrCat("flags_test.cc:", log_line + 1));
+  do_log();
+}
+
+TEST_F(LogFlagsTest, BacktraceAtWholeFilename) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { LOG(INFO) << "hello world"; };
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at, absl::StrCat(__FILE__, ":", log_line));
+  do_log();
+}
+
+TEST_F(LogFlagsTest, BacktraceAtNonmatchingSuffix) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { LOG(INFO) << "hello world"; };
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at,
+                absl::StrCat("flags_test.cc:", log_line, "gibberish"));
+  do_log();
+}
+
+TEST_F(LogFlagsTest, LogsBacktrace) {
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { LOG(INFO) << "hello world"; };
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  testing::InSequence seq;
+  EXPECT_CALL(test_sink, Send(TextMessage(HasSubstr("(stacktrace:"))));
+  EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
+
+  test_sink.StartCapturingLogs();
+  absl::SetFlag(&FLAGS_log_backtrace_at,
+                absl::StrCat("flags_test.cc:", log_line));
+  do_log();
+  absl::SetFlag(&FLAGS_log_backtrace_at, "");
+  do_log();
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/globals.cc b/abseil-cpp/absl/log/globals.cc
new file mode 100644
index 0000000..cc85438
--- /dev/null
+++ b/abseil-cpp/absl/log/globals.cc
@@ -0,0 +1,178 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/globals.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/log_severity.h"
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+// These atomics represent logging library configuration.
+// Integer types are used instead of absl::LogSeverity to ensure that a
+// lock-free std::atomic is used when possible.
+ABSL_CONST_INIT std::atomic<int> min_log_level{
+    static_cast<int>(absl::LogSeverityAtLeast::kInfo)};
+ABSL_CONST_INIT std::atomic<int> stderrthreshold{
+    static_cast<int>(absl::LogSeverityAtLeast::kError)};
+// We evaluate this value as a hash comparison to avoid having to
+// hold a mutex or make a copy (to access the value of a string-typed flag) in
+// very hot codepath.
+ABSL_CONST_INIT std::atomic<size_t> log_backtrace_at_hash{0};
+ABSL_CONST_INIT std::atomic<bool> prepend_log_prefix{true};
+
+constexpr char kDefaultAndroidTag[] = "native";
+ABSL_CONST_INIT std::atomic<const char*> android_log_tag{kDefaultAndroidTag};
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<log_internal::LoggingGlobalsListener>
+    logging_globals_listener;
+
+size_t HashSiteForLogBacktraceAt(absl::string_view file, int line) {
+  return absl::HashOf(file, line);
+}
+
+void TriggerLoggingGlobalsListener() {
+  auto* listener = logging_globals_listener.Load();
+  if (listener != nullptr) listener();
+}
+
+}  // namespace
+
+namespace log_internal {
+
+void RawSetMinLogLevel(absl::LogSeverityAtLeast severity) {
+  min_log_level.store(static_cast<int>(severity), std::memory_order_release);
+}
+
+void RawSetStderrThreshold(absl::LogSeverityAtLeast severity) {
+  stderrthreshold.store(static_cast<int>(severity), std::memory_order_release);
+}
+
+void RawEnableLogPrefix(bool on_off) {
+  prepend_log_prefix.store(on_off, std::memory_order_release);
+}
+
+void SetLoggingGlobalsListener(LoggingGlobalsListener l) {
+  logging_globals_listener.Store(l);
+}
+
+}  // namespace log_internal
+
+absl::LogSeverityAtLeast MinLogLevel() {
+  return static_cast<absl::LogSeverityAtLeast>(
+      min_log_level.load(std::memory_order_acquire));
+}
+
+void SetMinLogLevel(absl::LogSeverityAtLeast severity) {
+  log_internal::RawSetMinLogLevel(severity);
+  TriggerLoggingGlobalsListener();
+}
+
+namespace log_internal {
+
+ScopedMinLogLevel::ScopedMinLogLevel(absl::LogSeverityAtLeast severity)
+    : saved_severity_(absl::MinLogLevel()) {
+  absl::SetMinLogLevel(severity);
+}
+ScopedMinLogLevel::~ScopedMinLogLevel() {
+  absl::SetMinLogLevel(saved_severity_);
+}
+
+}  // namespace log_internal
+
+absl::LogSeverityAtLeast StderrThreshold() {
+  return static_cast<absl::LogSeverityAtLeast>(
+      stderrthreshold.load(std::memory_order_acquire));
+}
+
+void SetStderrThreshold(absl::LogSeverityAtLeast severity) {
+  log_internal::RawSetStderrThreshold(severity);
+  TriggerLoggingGlobalsListener();
+}
+
+ScopedStderrThreshold::ScopedStderrThreshold(absl::LogSeverityAtLeast severity)
+    : saved_severity_(absl::StderrThreshold()) {
+  absl::SetStderrThreshold(severity);
+}
+
+ScopedStderrThreshold::~ScopedStderrThreshold() {
+  absl::SetStderrThreshold(saved_severity_);
+}
+
+namespace log_internal {
+
+const char* GetAndroidNativeTag() {
+  return android_log_tag.load(std::memory_order_acquire);
+}
+
+}  // namespace log_internal
+
+void SetAndroidNativeTag(const char* tag) {
+  ABSL_CONST_INIT static std::atomic<const std::string*> user_log_tag(nullptr);
+  ABSL_INTERNAL_CHECK(tag, "tag must be non-null.");
+
+  const std::string* tag_str = new std::string(tag);
+  ABSL_INTERNAL_CHECK(
+      android_log_tag.exchange(tag_str->c_str(), std::memory_order_acq_rel) ==
+          kDefaultAndroidTag,
+      "SetAndroidNativeTag() must only be called once per process!");
+  user_log_tag.store(tag_str, std::memory_order_relaxed);
+}
+
+namespace log_internal {
+
+bool ShouldLogBacktraceAt(absl::string_view file, int line) {
+  const size_t flag_hash =
+      log_backtrace_at_hash.load(std::memory_order_relaxed);
+
+  return flag_hash != 0 && flag_hash == HashSiteForLogBacktraceAt(file, line);
+}
+
+}  // namespace log_internal
+
+void SetLogBacktraceLocation(absl::string_view file, int line) {
+  log_backtrace_at_hash.store(HashSiteForLogBacktraceAt(file, line),
+                              std::memory_order_relaxed);
+}
+
+void ClearLogBacktraceLocation() {
+  log_backtrace_at_hash.store(0, std::memory_order_relaxed);
+}
+
+bool ShouldPrependLogPrefix() {
+  return prepend_log_prefix.load(std::memory_order_acquire);
+}
+
+void EnableLogPrefix(bool on_off) {
+  log_internal::RawEnableLogPrefix(on_off);
+  TriggerLoggingGlobalsListener();
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/globals.h b/abseil-cpp/absl/log/globals.h
new file mode 100644
index 0000000..bc3864c
--- /dev/null
+++ b/abseil-cpp/absl/log/globals.h
@@ -0,0 +1,195 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/globals.h
+// -----------------------------------------------------------------------------
+//
+// This header declares global logging library configuration knobs.
+
+#ifndef ABSL_LOG_GLOBALS_H_
+#define ABSL_LOG_GLOBALS_H_
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+//------------------------------------------------------------------------------
+//  Minimum Log Level
+//------------------------------------------------------------------------------
+//
+// Messages logged at or above this severity are directed to all registered log
+// sinks or skipped otherwise. This parameter can also be modified using
+// command line flag --minloglevel.
+// See absl/base/log_severity.h for descriptions of severity levels.
+
+// MinLogLevel()
+//
+// Returns the value of the Minimum Log Level parameter.
+// This function is async-signal-safe.
+ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast MinLogLevel();
+
+// SetMinLogLevel()
+//
+// Updates the value of Minimum Log Level parameter.
+// This function is async-signal-safe.
+void SetMinLogLevel(absl::LogSeverityAtLeast severity);
+
+namespace log_internal {
+
+// ScopedMinLogLevel
+//
+// RAII type used to temporarily update the Min Log Level parameter.
+class ScopedMinLogLevel final {
+ public:
+  explicit ScopedMinLogLevel(absl::LogSeverityAtLeast severity);
+  ScopedMinLogLevel(const ScopedMinLogLevel&) = delete;
+  ScopedMinLogLevel& operator=(const ScopedMinLogLevel&) = delete;
+  ~ScopedMinLogLevel();
+
+ private:
+  absl::LogSeverityAtLeast saved_severity_;
+};
+
+}  // namespace log_internal
+
+//------------------------------------------------------------------------------
+// Stderr Threshold
+//------------------------------------------------------------------------------
+//
+// Messages logged at or above this level are directed to stderr in
+// addition to other registered log sinks. This parameter can also be modified
+// using command line flag --stderrthreshold.
+// See absl/base/log_severity.h for descriptions of severity levels.
+
+// StderrThreshold()
+//
+// Returns the value of the Stderr Threshold parameter.
+// This function is async-signal-safe.
+ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast StderrThreshold();
+
+// SetStderrThreshold()
+//
+// Updates the Stderr Threshold parameter.
+// This function is async-signal-safe.
+void SetStderrThreshold(absl::LogSeverityAtLeast severity);
+inline void SetStderrThreshold(absl::LogSeverity severity) {
+  absl::SetStderrThreshold(static_cast<absl::LogSeverityAtLeast>(severity));
+}
+
+// ScopedStderrThreshold
+//
+// RAII type used to temporarily update the Stderr Threshold parameter.
+class ScopedStderrThreshold final {
+ public:
+  explicit ScopedStderrThreshold(absl::LogSeverityAtLeast severity);
+  ScopedStderrThreshold(const ScopedStderrThreshold&) = delete;
+  ScopedStderrThreshold& operator=(const ScopedStderrThreshold&) = delete;
+  ~ScopedStderrThreshold();
+
+ private:
+  absl::LogSeverityAtLeast saved_severity_;
+};
+
+//------------------------------------------------------------------------------
+// Log Backtrace At
+//------------------------------------------------------------------------------
+//
+// Users can request an existing `LOG` statement, specified by file and line
+// number, to also include a backtrace when logged.
+
+// ShouldLogBacktraceAt()
+//
+// Returns true if we should log a backtrace at the specified location.
+namespace log_internal {
+ABSL_MUST_USE_RESULT bool ShouldLogBacktraceAt(absl::string_view file,
+                                               int line);
+}  // namespace log_internal
+
+// SetLogBacktraceLocation()
+//
+// Sets the location the backtrace should be logged at.  If the specified
+// location isn't a `LOG` statement, the effect will be the same as
+// `ClearLogBacktraceLocation` (but less efficient).
+void SetLogBacktraceLocation(absl::string_view file, int line);
+
+// ClearLogBacktraceLocation()
+//
+// Clears the set location so that backtraces will no longer be logged at it.
+void ClearLogBacktraceLocation();
+
+//------------------------------------------------------------------------------
+// Prepend Log Prefix
+//------------------------------------------------------------------------------
+//
+// This option tells the logging library that every logged message
+// should include the prefix (severity, date, time, PID, etc.)
+
+// ShouldPrependLogPrefix()
+//
+// Returns the value of the Prepend Log Prefix option.
+// This function is async-signal-safe.
+ABSL_MUST_USE_RESULT bool ShouldPrependLogPrefix();
+
+// EnableLogPrefix()
+//
+// Updates the value of the Prepend Log Prefix option.
+// This function is async-signal-safe.
+void EnableLogPrefix(bool on_off);
+
+//------------------------------------------------------------------------------
+// Configure Android Native Log Tag
+//------------------------------------------------------------------------------
+//
+// The logging library forwards to the Android system log API when built for
+// Android.  That API takes a string "tag" value in addition to a message and
+// severity level.  The tag is used to identify the source of messages and to
+// filter them.  This library uses the tag "native" by default.
+
+// SetAndroidNativeTag()
+//
+// Stores a copy of the string pointed to by `tag` and uses it as the Android
+// logging tag thereafter. `tag` must not be null.
+// This function must not be called more than once!
+void SetAndroidNativeTag(const char* tag);
+
+namespace log_internal {
+// GetAndroidNativeTag()
+//
+// Returns the configured Android logging tag.
+const char* GetAndroidNativeTag();
+}  // namespace log_internal
+
+namespace log_internal {
+
+using LoggingGlobalsListener = void (*)();
+void SetLoggingGlobalsListener(LoggingGlobalsListener l);
+
+// Internal implementation for the setter routines. These are used
+// to break circular dependencies between flags and globals. Each "Raw"
+// routine corresponds to the non-"Raw" counterpart and used to set the
+// configuration parameter directly without calling back to the listener.
+void RawSetMinLogLevel(absl::LogSeverityAtLeast severity);
+void RawSetStderrThreshold(absl::LogSeverityAtLeast severity);
+void RawEnableLogPrefix(bool on_off);
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_GLOBALS_H_
diff --git a/abseil-cpp/absl/log/globals_test.cc b/abseil-cpp/absl/log/globals_test.cc
new file mode 100644
index 0000000..f7af47c
--- /dev/null
+++ b/abseil-cpp/absl/log/globals_test.cc
@@ -0,0 +1,104 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/globals.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/globals.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/log.h"
+#include "absl/log/scoped_mock_log.h"
+
+namespace {
+using ::testing::_;
+using ::testing::StrEq;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+constexpr static absl::LogSeverityAtLeast DefaultMinLogLevel() {
+  return absl::LogSeverityAtLeast::kInfo;
+}
+constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {
+  return absl::LogSeverityAtLeast::kError;
+}
+
+TEST(TestGlobals, MinLogLevel) {
+  EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kError);
+  EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
+  absl::SetMinLogLevel(DefaultMinLogLevel());
+}
+
+TEST(TestGlobals, ScopedMinLogLevel) {
+  EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
+  {
+    absl::log_internal::ScopedMinLogLevel scoped_stderr_threshold(
+        absl::LogSeverityAtLeast::kError);
+    EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
+  }
+  EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
+}
+
+TEST(TestGlobals, StderrThreshold) {
+  EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
+  absl::SetStderrThreshold(absl::LogSeverityAtLeast::kError);
+  EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
+  absl::SetStderrThreshold(DefaultStderrThreshold());
+}
+
+TEST(TestGlobals, ScopedStderrThreshold) {
+  EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
+  {
+    absl::ScopedStderrThreshold scoped_stderr_threshold(
+        absl::LogSeverityAtLeast::kError);
+    EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
+  }
+  EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
+}
+
+TEST(TestGlobals, LogBacktraceAt) {
+  EXPECT_FALSE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111));
+  absl::SetLogBacktraceLocation("some_file.cc", 111);
+  EXPECT_TRUE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111));
+  EXPECT_FALSE(
+      absl::log_internal::ShouldLogBacktraceAt("another_file.cc", 222));
+}
+
+TEST(TestGlobals, LogPrefix) {
+  EXPECT_TRUE(absl::ShouldPrependLogPrefix());
+  absl::EnableLogPrefix(false);
+  EXPECT_FALSE(absl::ShouldPrependLogPrefix());
+  absl::EnableLogPrefix(true);
+  EXPECT_TRUE(absl::ShouldPrependLogPrefix());
+}
+
+TEST(TestGlobals, AndroidLogTag) {
+  // Verify invalid tags result in a check failure.
+  EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(nullptr), ".*");
+
+  // Verify valid tags applied.
+  EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("native"));
+  absl::SetAndroidNativeTag("test_tag");
+  EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("test_tag"));
+
+  // Verify that additional calls (more than 1) result in a check failure.
+  EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag("test_tag_fail"), ".*");
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/initialize.cc b/abseil-cpp/absl/log/initialize.cc
new file mode 100644
index 0000000..a3f6d6c
--- /dev/null
+++ b/abseil-cpp/absl/log/initialize.cc
@@ -0,0 +1,34 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/initialize.h"
+
+#include "absl/base/config.h"
+#include "absl/log/internal/globals.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+void InitializeLog() {
+  // This comes first since it is used by RAW_LOG.
+  absl::log_internal::SetTimeZone(absl::LocalTimeZone());
+
+  // Note that initialization is complete, so logs can now be sent to their
+  // proper destinations rather than stderr.
+  log_internal::SetInitialized();
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/initialize.h b/abseil-cpp/absl/log/initialize.h
new file mode 100644
index 0000000..f600eb6
--- /dev/null
+++ b/abseil-cpp/absl/log/initialize.h
@@ -0,0 +1,45 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/initialize.h
+// -----------------------------------------------------------------------------
+//
+// This header declares the Abseil Log initialization routine InitializeLog().
+
+#ifndef ABSL_LOG_INITIALIZE_H_
+#define ABSL_LOG_INITIALIZE_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// InitializeLog()
+//
+// Initializes the Abseil logging library.
+//
+// Before this function is called, all log messages are directed only to stderr.
+// After initialization is finished, log messages are directed to all registered
+// `LogSink`s.
+//
+// It is an error to call this function twice.
+//
+// There is no corresponding function to shut down the logging library.
+void InitializeLog();
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INITIALIZE_H_
diff --git a/abseil-cpp/absl/log/internal/BUILD.bazel b/abseil-cpp/absl/log/internal/BUILD.bazel
new file mode 100644
index 0000000..555c5e5
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/BUILD.bazel
@@ -0,0 +1,383 @@
+#
+# Copyright 2022 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = [
+    "//absl/log:__pkg__",
+])
+
+licenses(["notice"])
+
+cc_library(
+    name = "check_impl",
+    hdrs = ["check_impl.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":check_op",
+        ":conditions",
+        ":log_message",
+        ":strip",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_library(
+    name = "check_op",
+    srcs = ["check_op.cc"],
+    hdrs = ["check_op.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/log:__pkg__",
+    ],
+    deps = [
+        ":nullguard",
+        ":nullstream",
+        ":strip",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/strings",
+    ],
+)
+
+cc_library(
+    name = "conditions",
+    srcs = ["conditions.cc"],
+    hdrs = ["conditions.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":voidify",
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_library(
+    name = "config",
+    hdrs = ["config.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/log:__pkg__",
+    ],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_library(
+    name = "flags",
+    hdrs = ["flags.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/flags:flag",
+    ],
+)
+
+cc_library(
+    name = "format",
+    srcs = ["log_format.cc"],
+    hdrs = ["log_format.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":append_truncated",
+        ":config",
+        ":globals",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/strings",
+        "//absl/strings:str_format",
+        "//absl/time",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "globals",
+    srcs = ["globals.cc"],
+    hdrs = ["globals.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/log:__pkg__",
+    ],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/base:raw_logging_internal",
+        "//absl/strings",
+        "//absl/time",
+    ],
+)
+
+cc_library(
+    name = "log_impl",
+    hdrs = ["log_impl.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":conditions",
+        ":log_message",
+        ":strip",
+    ],
+)
+
+cc_library(
+    name = "log_message",
+    srcs = ["log_message.cc"],
+    hdrs = ["log_message.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/log:__pkg__",
+    ],
+    deps = [
+        ":append_truncated",
+        ":format",
+        ":globals",
+        ":log_sink_set",
+        ":nullguard",
+        ":proto",
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:errno_saver",
+        "//absl/base:log_severity",
+        "//absl/base:raw_logging_internal",
+        "//absl/base:strerror",
+        "//absl/container:inlined_vector",
+        "//absl/debugging:examine_stack",
+        "//absl/log:globals",
+        "//absl/log:log_entry",
+        "//absl/log:log_sink",
+        "//absl/log:log_sink_registry",
+        "//absl/memory",
+        "//absl/strings",
+        "//absl/time",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "append_truncated",
+    hdrs = ["append_truncated.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/strings",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "log_sink_set",
+    srcs = ["log_sink_set.cc"],
+    hdrs = ["log_sink_set.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS + select({
+        "//conditions:default": [],
+        "@platforms//os:android": ["-llog"],
+    }),
+    deps = [
+        ":config",
+        ":globals",
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/base:raw_logging_internal",
+        "//absl/cleanup",
+        "//absl/log:globals",
+        "//absl/log:log_entry",
+        "//absl/log:log_sink",
+        "//absl/strings",
+        "//absl/synchronization",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "nullguard",
+    srcs = ["nullguard.cc"],
+    hdrs = ["nullguard.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_library(
+    name = "nullstream",
+    hdrs = ["nullstream.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/strings",
+    ],
+)
+
+cc_library(
+    name = "strip",
+    hdrs = ["strip.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_message",
+        ":nullstream",
+        "//absl/base:log_severity",
+    ],
+)
+
+cc_library(
+    name = "structured",
+    hdrs = ["structured.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_message",
+        "//absl/base:config",
+        "//absl/strings",
+    ],
+)
+
+cc_library(
+    name = "test_actions",
+    testonly = True,
+    srcs = ["test_actions.cc"],
+    hdrs = ["test_actions.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log:log_entry",
+        "//absl/strings",
+        "//absl/time",
+    ] + select({
+        "//absl:msvc_compiler": [],
+        "//conditions:default": [
+        ],
+    }),
+)
+
+cc_library(
+    name = "test_helpers",
+    testonly = True,
+    srcs = ["test_helpers.cc"],
+    hdrs = ["test_helpers.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":globals",
+        "//absl/base:config",
+        "//absl/base:log_severity",
+        "//absl/log:globals",
+        "//absl/log:initialize",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_library(
+    name = "test_matchers",
+    testonly = True,
+    srcs = ["test_matchers.cc"],
+    hdrs = ["test_matchers.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":test_helpers",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log:log_entry",
+        "//absl/strings",
+        "//absl/time",
+        "@com_google_googletest//:gtest",
+    ] + select({
+        "//absl:msvc_compiler": [],
+        "//conditions:default": [
+        ],
+    }),
+)
+
+cc_library(
+    name = "voidify",
+    hdrs = ["voidify.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = ["//absl/base:config"],
+)
+
+cc_library(
+    name = "proto",
+    srcs = ["proto.cc"],
+    hdrs = ["proto.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/strings",
+        "//absl/types:span",
+    ],
+)
+
+# Test targets
+cc_test(
+    name = "stderr_log_sink_test",
+    size = "small",
+    srcs = ["stderr_log_sink_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test:os:android",
+        "no_test:os:ios",
+        "no_test_android",
+        "no_test_darwin_x86_64",
+        "no_test_ios",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":test_helpers",
+        "//absl/base:core_headers",
+        "//absl/base:log_severity",
+        "//absl/log",
+        "//absl/log:globals",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/abseil-cpp/absl/log/internal/append_truncated.h b/abseil-cpp/absl/log/internal/append_truncated.h
new file mode 100644
index 0000000..f0e7912
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/append_truncated.h
@@ -0,0 +1,47 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_LOG_INTERNAL_APPEND_TRUNCATED_H_
+#define ABSL_LOG_INTERNAL_APPEND_TRUNCATED_H_
+
+#include <cstddef>
+#include <cstring>
+
+#include "absl/base/config.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+// Copies into `dst` as many bytes of `src` as will fit, then truncates the
+// copied bytes from the front of `dst` and returns the number of bytes written.
+inline size_t AppendTruncated(absl::string_view src, absl::Span<char> &dst) {
+  if (src.size() > dst.size()) src = src.substr(0, dst.size());
+  memcpy(dst.data(), src.data(), src.size());
+  dst.remove_prefix(src.size());
+  return src.size();
+}
+// Likewise, but `n` copies of `c`.
+inline size_t AppendTruncated(char c, size_t n, absl::Span<char> &dst) {
+  if (n > dst.size()) n = dst.size();
+  memset(dst.data(), c, n);
+  dst.remove_prefix(n);
+  return n;
+}
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_APPEND_TRUNCATED_H_
diff --git a/abseil-cpp/absl/log/internal/check_impl.h b/abseil-cpp/absl/log/internal/check_impl.h
new file mode 100644
index 0000000..00f25f8
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/check_impl.h
@@ -0,0 +1,150 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_LOG_INTERNAL_CHECK_IMPL_H_
+#define ABSL_LOG_INTERNAL_CHECK_IMPL_H_
+
+#include "absl/base/optimization.h"
+#include "absl/log/internal/check_op.h"
+#include "absl/log/internal/conditions.h"
+#include "absl/log/internal/log_message.h"
+#include "absl/log/internal/strip.h"
+
+// CHECK
+#define ABSL_LOG_INTERNAL_CHECK_IMPL(condition, condition_text)       \
+  ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS,                        \
+                                    ABSL_PREDICT_FALSE(!(condition))) \
+  ABSL_LOG_INTERNAL_CHECK(condition_text).InternalStream()
+
+#define ABSL_LOG_INTERNAL_QCHECK_IMPL(condition, condition_text)       \
+  ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS,                        \
+                                     ABSL_PREDICT_FALSE(!(condition))) \
+  ABSL_LOG_INTERNAL_QCHECK(condition_text).InternalStream()
+
+#define ABSL_LOG_INTERNAL_PCHECK_IMPL(condition, condition_text) \
+  ABSL_LOG_INTERNAL_CHECK_IMPL(condition, condition_text).WithPerror()
+
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DCHECK_IMPL(condition, condition_text) \
+  ABSL_LOG_INTERNAL_CHECK_IMPL(condition, condition_text)
+#else
+#define ABSL_LOG_INTERNAL_DCHECK_IMPL(condition, condition_text) \
+  ABSL_LOG_INTERNAL_CHECK_IMPL(true || (condition), "true")
+#endif
+
+// CHECK_EQ
+#define ABSL_LOG_INTERNAL_CHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_OP(Check_EQ, ==, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_CHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_OP(Check_NE, !=, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_CHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_OP(Check_LE, <=, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_CHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_OP(Check_LT, <, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_CHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_OP(Check_GE, >=, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_CHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_OP(Check_GT, >, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OP(Check_EQ, ==, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OP(Check_NE, !=, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OP(Check_LE, <=, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OP(Check_LT, <, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OP(Check_GE, >=, val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OP(Check_GT, >, val1, val1_text, val2, val2_text)
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_EQ_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_NE_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_LE_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_LT_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_GE_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_CHECK_GT_IMPL(val1, val1_text, val2, val2_text)
+#else  // ndef NDEBUG
+#define ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
+#define ABSL_LOG_INTERNAL_DCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
+#define ABSL_LOG_INTERNAL_DCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
+#define ABSL_LOG_INTERNAL_DCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
+#define ABSL_LOG_INTERNAL_DCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
+#define ABSL_LOG_INTERNAL_DCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
+#endif  // def NDEBUG
+
+// CHECK_OK
+#define ABSL_LOG_INTERNAL_CHECK_OK_IMPL(status, status_text) \
+  ABSL_LOG_INTERNAL_CHECK_OK(status, status_text)
+#define ABSL_LOG_INTERNAL_QCHECK_OK_IMPL(status, status_text) \
+  ABSL_LOG_INTERNAL_QCHECK_OK(status, status_text)
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DCHECK_OK_IMPL(status, status_text) \
+  ABSL_LOG_INTERNAL_CHECK_OK(status, status_text)
+#else
+#define ABSL_LOG_INTERNAL_DCHECK_OK_IMPL(status, status_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(status, nullptr)
+#endif
+
+// CHECK_STREQ
+#define ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STROP(strcmp, ==, true, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STROP(strcmp, !=, false, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STROP(strcasecmp, ==, true, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STROP(strcasecmp, !=, false, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_STROP(strcmp, ==, true, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_STROP(strcmp, !=, false, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, ==, true, s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, !=, false, s1, s1_text, s2,  \
+                                 s2_text)
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text)
+#else  // ndef NDEBUG
+#define ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
+#define ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+  ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
+#endif  // def NDEBUG
+
+#endif  // ABSL_LOG_INTERNAL_CHECK_IMPL_H_
diff --git a/abseil-cpp/absl/log/internal/check_op.cc b/abseil-cpp/absl/log/internal/check_op.cc
new file mode 100644
index 0000000..f4b6764
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/check_op.cc
@@ -0,0 +1,118 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/check_op.h"
+
+#include <string.h>
+
+#ifdef _MSC_VER
+#define strcasecmp _stricmp
+#else
+#include <strings.h>  // for strcasecmp, but msvc does not have this header
+#endif
+
+#include <sstream>
+#include <string>
+
+#include "absl/base/config.h"
+#include "absl/strings/str_cat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+#define ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(x) \
+  template std::string* MakeCheckOpString(x, x, const char*)
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(bool);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(int64_t);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(uint64_t);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(float);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(double);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(char);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(unsigned char);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const std::string&);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const absl::string_view&);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const char*);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const signed char*);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const unsigned char*);
+ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const void*);
+#undef ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING
+
+CheckOpMessageBuilder::CheckOpMessageBuilder(const char* exprtext) {
+  stream_ << exprtext << " (";
+}
+
+std::ostream& CheckOpMessageBuilder::ForVar2() {
+  stream_ << " vs. ";
+  return stream_;
+}
+
+std::string* CheckOpMessageBuilder::NewString() {
+  stream_ << ")";
+  return new std::string(stream_.str());
+}
+
+void MakeCheckOpValueString(std::ostream& os, const char v) {
+  if (v >= 32 && v <= 126) {
+    os << "'" << v << "'";
+  } else {
+    os << "char value " << int{v};
+  }
+}
+
+void MakeCheckOpValueString(std::ostream& os, const signed char v) {
+  if (v >= 32 && v <= 126) {
+    os << "'" << v << "'";
+  } else {
+    os << "signed char value " << int{v};
+  }
+}
+
+void MakeCheckOpValueString(std::ostream& os, const unsigned char v) {
+  if (v >= 32 && v <= 126) {
+    os << "'" << v << "'";
+  } else {
+    os << "unsigned char value " << int{v};
+  }
+}
+
+void MakeCheckOpValueString(std::ostream& os, const void* p) {
+  if (p == nullptr) {
+    os << "(null)";
+  } else {
+    os << p;
+  }
+}
+
+// Helper functions for string comparisons.
+#define DEFINE_CHECK_STROP_IMPL(name, func, expected)                      \
+  std::string* Check##func##expected##Impl(const char* s1, const char* s2, \
+                                           const char* exprtext) {         \
+    bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2));                  \
+    if (equal == expected) {                                               \
+      return nullptr;                                                      \
+    } else {                                                               \
+      return new std::string(                                              \
+          absl::StrCat(exprtext, " (", s1, " vs. ", s2, ")"));             \
+    }                                                                      \
+  }
+DEFINE_CHECK_STROP_IMPL(CHECK_STREQ, strcmp, true)
+DEFINE_CHECK_STROP_IMPL(CHECK_STRNE, strcmp, false)
+DEFINE_CHECK_STROP_IMPL(CHECK_STRCASEEQ, strcasecmp, true)
+DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false)
+#undef DEFINE_CHECK_STROP_IMPL
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/check_op.h b/abseil-cpp/absl/log/internal/check_op.h
new file mode 100644
index 0000000..20b01b5
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/check_op.h
@@ -0,0 +1,394 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/check_op.h
+// -----------------------------------------------------------------------------
+//
+// This file declares helpers routines and macros used to implement `CHECK`
+// macros.
+
+#ifndef ABSL_LOG_INTERNAL_CHECK_OP_H_
+#define ABSL_LOG_INTERNAL_CHECK_OP_H_
+
+#include <stdint.h>
+
+#include <ostream>
+#include <sstream>
+#include <string>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/log/internal/nullguard.h"
+#include "absl/log/internal/nullstream.h"
+#include "absl/log/internal/strip.h"
+
+// `ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL` wraps string literals that
+// should be stripped when `ABSL_MIN_LOG_LEVEL` exceeds `kFatal`.
+#ifdef ABSL_MIN_LOG_LEVEL
+#define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal)         \
+  (::absl::LogSeverity::kFatal >=                               \
+           static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \
+       ? (literal)                                              \
+       : "")
+#else
+#define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal) (literal)
+#endif
+
+#ifdef NDEBUG
+// `NDEBUG` is defined, so `DCHECK_EQ(x, y)` and so on do nothing.  However, we
+// still want the compiler to parse `x` and `y`, because we don't want to lose
+// potentially useful errors and warnings.
+#define ABSL_LOG_INTERNAL_DCHECK_NOP(x, y)   \
+  while (false && ((void)(x), (void)(y), 0)) \
+  ::absl::log_internal::NullStream().InternalStream()
+#endif
+
+#define ABSL_LOG_INTERNAL_CHECK_OP(name, op, val1, val1_text, val2, val2_text) \
+  while (                                                                      \
+      ::std::string* absl_log_internal_check_op_result ABSL_ATTRIBUTE_UNUSED = \
+          ::absl::log_internal::name##Impl(                                    \
+              ::absl::log_internal::GetReferenceableValue(val1),               \
+              ::absl::log_internal::GetReferenceableValue(val2),               \
+              ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val1_text                 \
+                                                     " " #op " " val2_text)))  \
+  ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_op_result).InternalStream()
+#define ABSL_LOG_INTERNAL_QCHECK_OP(name, op, val1, val1_text, val2, \
+                                    val2_text)                       \
+  while (::std::string* absl_log_internal_qcheck_op_result =         \
+             ::absl::log_internal::name##Impl(                       \
+                 ::absl::log_internal::GetReferenceableValue(val1),  \
+                 ::absl::log_internal::GetReferenceableValue(val2),  \
+                 ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(             \
+                     val1_text " " #op " " val2_text)))              \
+  ABSL_LOG_INTERNAL_QCHECK(*absl_log_internal_qcheck_op_result).InternalStream()
+#define ABSL_LOG_INTERNAL_CHECK_STROP(func, op, expected, s1, s1_text, s2,     \
+                                      s2_text)                                 \
+  while (::std::string* absl_log_internal_check_strop_result =                 \
+             ::absl::log_internal::Check##func##expected##Impl(                \
+                 (s1), (s2),                                                   \
+                 ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(s1_text " " #op        \
+                                                                " " s2_text))) \
+  ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_strop_result)               \
+      .InternalStream()
+#define ABSL_LOG_INTERNAL_QCHECK_STROP(func, op, expected, s1, s1_text, s2,    \
+                                       s2_text)                                \
+  while (::std::string* absl_log_internal_qcheck_strop_result =                \
+             ::absl::log_internal::Check##func##expected##Impl(                \
+                 (s1), (s2),                                                   \
+                 ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(s1_text " " #op        \
+                                                                " " s2_text))) \
+  ABSL_LOG_INTERNAL_QCHECK(*absl_log_internal_qcheck_strop_result)             \
+      .InternalStream()
+// This one is tricky:
+// * We must evaluate `val` exactly once, yet we need to do two things with it:
+//   evaluate `.ok()` and (sometimes) `.ToString()`.
+// * `val` might be an `absl::Status` or some `absl::StatusOr<T>`.
+// * `val` might be e.g. `ATemporary().GetStatus()`, which may return a
+//   reference to a member of `ATemporary` that is only valid until the end of
+//   the full expression.
+// * We don't want this file to depend on `absl::Status` `#include`s or linkage,
+//   nor do we want to move the definition to status and introduce a dependency
+//   in the other direction.  We can be assured that callers must already have a
+//   `Status` and the necessary `#include`s and linkage.
+// * Callsites should be small and fast (at least when `val.ok()`): one branch,
+//   minimal stack footprint.
+//   * In particular, the string concat stuff should be out-of-line and emitted
+//     in only one TU to save linker input size
+// * We want the `val.ok()` check inline so static analyzers and optimizers can
+//   see it.
+// * As usual, no braces so we can stream into the expansion with `operator<<`.
+// * Also as usual, it must expand to a single (partial) statement with no
+//   ambiguous-else problems.
+#define ABSL_LOG_INTERNAL_CHECK_OK(val, val_text)                        \
+  for (::std::pair<const ::absl::Status*, ::std::string*>                \
+           absl_log_internal_check_ok_goo;                               \
+       absl_log_internal_check_ok_goo.first =                            \
+           ::absl::log_internal::AsStatus(val),                          \
+       absl_log_internal_check_ok_goo.second =                           \
+           ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok()) \
+               ? nullptr                                                 \
+               : ::absl::status_internal::MakeCheckFailString(           \
+                     absl_log_internal_check_ok_goo.first,               \
+                     ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val_text     \
+                                                            " is OK")),  \
+       !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());)  \
+  ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_ok_goo.second)        \
+      .InternalStream()
+#define ABSL_LOG_INTERNAL_QCHECK_OK(val, val_text)                       \
+  for (::std::pair<const ::absl::Status*, ::std::string*>                \
+           absl_log_internal_check_ok_goo;                               \
+       absl_log_internal_check_ok_goo.first =                            \
+           ::absl::log_internal::AsStatus(val),                          \
+       absl_log_internal_check_ok_goo.second =                           \
+           ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok()) \
+               ? nullptr                                                 \
+               : ::absl::status_internal::MakeCheckFailString(           \
+                     absl_log_internal_check_ok_goo.first,               \
+                     ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val_text     \
+                                                            " is OK")),  \
+       !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());)  \
+  ABSL_LOG_INTERNAL_QCHECK(*absl_log_internal_check_ok_goo.second)       \
+      .InternalStream()
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+class Status;
+template <typename T>
+class StatusOr;
+
+namespace status_internal {
+std::string* MakeCheckFailString(const absl::Status* status,
+                                 const char* prefix);
+}  // namespace status_internal
+
+namespace log_internal {
+
+// Convert a Status or a StatusOr to its underlying status value.
+//
+// (This implementation does not require a dep on absl::Status to work.)
+inline const absl::Status* AsStatus(const absl::Status& s) { return &s; }
+template <typename T>
+const absl::Status* AsStatus(const absl::StatusOr<T>& s) {
+  return &s.status();
+}
+
+// A helper class for formatting `expr (V1 vs. V2)` in a `CHECK_XX` statement.
+// See `MakeCheckOpString` for sample usage.
+class CheckOpMessageBuilder final {
+ public:
+  // Inserts `exprtext` and ` (` to the stream.
+  explicit CheckOpMessageBuilder(const char* exprtext);
+  ~CheckOpMessageBuilder() = default;
+  // For inserting the first variable.
+  std::ostream& ForVar1() { return stream_; }
+  // For inserting the second variable (adds an intermediate ` vs. `).
+  std::ostream& ForVar2();
+  // Get the result (inserts the closing `)`).
+  std::string* NewString();
+
+ private:
+  std::ostringstream stream_;
+};
+
+// This formats a value for a failing `CHECK_XX` statement.  Ordinarily, it uses
+// the definition for `operator<<`, with a few special cases below.
+template <typename T>
+inline void MakeCheckOpValueString(std::ostream& os, const T& v) {
+  os << log_internal::NullGuard<T>::Guard(v);
+}
+
+// Overloads for char types provide readable values for unprintable characters.
+void MakeCheckOpValueString(std::ostream& os, char v);
+void MakeCheckOpValueString(std::ostream& os, signed char v);
+void MakeCheckOpValueString(std::ostream& os, unsigned char v);
+void MakeCheckOpValueString(std::ostream& os, const void* p);
+
+namespace detect_specialization {
+
+// MakeCheckOpString is being specialized for every T and U pair that is being
+// passed to the CHECK_op macros. However, there is a lot of redundancy in these
+// specializations that creates unnecessary library and binary bloat.
+// The number of instantiations tends to be O(n^2) because we have two
+// independent inputs. This technique works by reducing `n`.
+//
+// Most user-defined types being passed to CHECK_op end up being printed as a
+// builtin type. For example, enums tend to be implicitly converted to its
+// underlying type when calling operator<<, and pointers are printed with the
+// `const void*` overload.
+// To reduce the number of instantiations we coerce these values before calling
+// MakeCheckOpString instead of inside it.
+//
+// To detect if this coercion is needed, we duplicate all the relevant
+// operator<< overloads as specified in the standard, just in a different
+// namespace. If the call to `stream << value` becomes ambiguous, it means that
+// one of these overloads is the one selected by overload resolution. We then
+// do overload resolution again just with our overload set to see which one gets
+// selected. That tells us which type to coerce to.
+// If the augmented call was not ambiguous, it means that none of these were
+// selected and we can't coerce the input.
+//
+// As a secondary step to reduce code duplication, we promote integral types to
+// their 64-bit variant. This does not change the printed value, but reduces the
+// number of instantiations even further. Promoting an integer is very cheap at
+// the call site.
+int64_t operator<<(std::ostream&, short value);           // NOLINT
+int64_t operator<<(std::ostream&, unsigned short value);  // NOLINT
+int64_t operator<<(std::ostream&, int value);
+int64_t operator<<(std::ostream&, unsigned int value);
+int64_t operator<<(std::ostream&, long value);                 // NOLINT
+uint64_t operator<<(std::ostream&, unsigned long value);       // NOLINT
+int64_t operator<<(std::ostream&, long long value);            // NOLINT
+uint64_t operator<<(std::ostream&, unsigned long long value);  // NOLINT
+float operator<<(std::ostream&, float value);
+double operator<<(std::ostream&, double value);
+long double operator<<(std::ostream&, long double value);
+bool operator<<(std::ostream&, bool value);
+const void* operator<<(std::ostream&, const void* value);
+const void* operator<<(std::ostream&, std::nullptr_t);
+
+// These `char` overloads are specified like this in the standard, so we have to
+// write them exactly the same to ensure the call is ambiguous.
+// If we wrote it in a different way (eg taking std::ostream instead of the
+// template) then one call might have a higher rank than the other and it would
+// not be ambiguous.
+template <typename Traits>
+char operator<<(std::basic_ostream<char, Traits>&, char);
+template <typename Traits>
+signed char operator<<(std::basic_ostream<char, Traits>&, signed char);
+template <typename Traits>
+unsigned char operator<<(std::basic_ostream<char, Traits>&, unsigned char);
+template <typename Traits>
+const char* operator<<(std::basic_ostream<char, Traits>&, const char*);
+template <typename Traits>
+const signed char* operator<<(std::basic_ostream<char, Traits>&,
+                              const signed char*);
+template <typename Traits>
+const unsigned char* operator<<(std::basic_ostream<char, Traits>&,
+                                const unsigned char*);
+
+// This overload triggers when the call is not ambiguous.
+// It means that T is being printed with some overload not on this list.
+// We keep the value as `const T&`.
+template <typename T, typename = decltype(std::declval<std::ostream&>()
+                                          << std::declval<const T&>())>
+const T& Detect(int);
+
+// This overload triggers when the call is ambiguous.
+// It means that T is either one from this list or printed as one from this
+// list. Eg an enum that decays to `int` for printing.
+// We ask the overload set to give us the type we want to convert it to.
+template <typename T>
+decltype(detect_specialization::operator<<(std::declval<std::ostream&>(),
+                                           std::declval<const T&>()))
+Detect(char);
+
+}  // namespace detect_specialization
+
+template <typename T>
+using CheckOpStreamType = decltype(detect_specialization::Detect<T>(0));
+
+// Build the error message string.  Specify no inlining for code size.
+template <typename T1, typename T2>
+ABSL_ATTRIBUTE_RETURNS_NONNULL std::string* MakeCheckOpString(
+    T1 v1, T2 v2, const char* exprtext) ABSL_ATTRIBUTE_NOINLINE;
+
+template <typename T1, typename T2>
+std::string* MakeCheckOpString(T1 v1, T2 v2, const char* exprtext) {
+  CheckOpMessageBuilder comb(exprtext);
+  MakeCheckOpValueString(comb.ForVar1(), v1);
+  MakeCheckOpValueString(comb.ForVar2(), v2);
+  return comb.NewString();
+}
+
+// Add a few commonly used instantiations as extern to reduce size of objects
+// files.
+#define ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(x) \
+  extern template std::string* MakeCheckOpString(x, x, const char*)
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(bool);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(int64_t);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(uint64_t);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(float);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(double);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(char);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(unsigned char);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const std::string&);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const absl::string_view&);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const char*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const signed char*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const unsigned char*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*);
+#undef ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN
+
+// Helper functions for `ABSL_LOG_INTERNAL_CHECK_OP` macro family.  The
+// `(int, int)` override works around the issue that the compiler will not
+// instantiate the template version of the function on values of unnamed enum
+// type.
+#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op)                        \
+  template <typename T1, typename T2>                                    \
+  inline constexpr ::std::string* name##Impl(const T1& v1, const T2& v2, \
+                                             const char* exprtext) {     \
+    using U1 = CheckOpStreamType<T1>;                                    \
+    using U2 = CheckOpStreamType<T2>;                                    \
+    return ABSL_PREDICT_TRUE(v1 op v2)                                   \
+               ? nullptr                                                 \
+               : MakeCheckOpString<U1, U2>(v1, v2, exprtext);            \
+  }                                                                      \
+  inline constexpr ::std::string* name##Impl(int v1, int v2,             \
+                                             const char* exprtext) {     \
+    return name##Impl<int, int>(v1, v2, exprtext);                       \
+  }
+
+ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_EQ, ==)
+ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_NE, !=)
+ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_LE, <=)
+ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_LT, <)
+ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_GE, >=)
+ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_GT, >)
+#undef ABSL_LOG_INTERNAL_CHECK_OP_IMPL
+
+std::string* CheckstrcmptrueImpl(const char* s1, const char* s2,
+                                 const char* exprtext);
+std::string* CheckstrcmpfalseImpl(const char* s1, const char* s2,
+                                  const char* exprtext);
+std::string* CheckstrcasecmptrueImpl(const char* s1, const char* s2,
+                                     const char* exprtext);
+std::string* CheckstrcasecmpfalseImpl(const char* s1, const char* s2,
+                                      const char* exprtext);
+
+// `CHECK_EQ` and friends want to pass their arguments by reference, however
+// this winds up exposing lots of cases where people have defined and
+// initialized static const data members but never declared them (i.e. in a .cc
+// file), meaning they are not referenceable.  This function avoids that problem
+// for integers (the most common cases) by overloading for every primitive
+// integer type, even the ones we discourage, and returning them by value.
+template <typename T>
+inline constexpr const T& GetReferenceableValue(const T& t) {
+  return t;
+}
+inline constexpr char GetReferenceableValue(char t) { return t; }
+inline constexpr unsigned char GetReferenceableValue(unsigned char t) {
+  return t;
+}
+inline constexpr signed char GetReferenceableValue(signed char t) { return t; }
+inline constexpr short GetReferenceableValue(short t) { return t; }  // NOLINT
+inline constexpr unsigned short GetReferenceableValue(               // NOLINT
+    unsigned short t) {                                              // NOLINT
+  return t;
+}
+inline constexpr int GetReferenceableValue(int t) { return t; }
+inline constexpr unsigned int GetReferenceableValue(unsigned int t) {
+  return t;
+}
+inline constexpr long GetReferenceableValue(long t) { return t; }  // NOLINT
+inline constexpr unsigned long GetReferenceableValue(              // NOLINT
+    unsigned long t) {                                             // NOLINT
+  return t;
+}
+inline constexpr long long GetReferenceableValue(long long t) {  // NOLINT
+  return t;
+}
+inline constexpr unsigned long long GetReferenceableValue(  // NOLINT
+    unsigned long long t) {                                 // NOLINT
+  return t;
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_CHECK_OP_H_
diff --git a/abseil-cpp/absl/log/internal/conditions.cc b/abseil-cpp/absl/log/internal/conditions.cc
new file mode 100644
index 0000000..a9f4966
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/conditions.cc
@@ -0,0 +1,83 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/conditions.h"
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/cycleclock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+namespace {
+
+// The following code behaves like AtomicStatsCounter::LossyAdd() for
+// speed since it is fine to lose occasional updates.
+// Returns old value of *counter.
+uint32_t LossyIncrement(std::atomic<uint32_t>* counter) {
+  const uint32_t value = counter->load(std::memory_order_relaxed);
+  counter->store(value + 1, std::memory_order_relaxed);
+  return value;
+}
+
+}  // namespace
+
+bool LogEveryNState::ShouldLog(int n) {
+  return n > 0 && (LossyIncrement(&counter_) % static_cast<uint32_t>(n)) == 0;
+}
+
+bool LogFirstNState::ShouldLog(int n) {
+  const uint32_t counter_value = counter_.load(std::memory_order_relaxed);
+  if (static_cast<int64_t>(counter_value) < n) {
+    counter_.store(counter_value + 1, std::memory_order_relaxed);
+    return true;
+  }
+  return false;
+}
+
+bool LogEveryPow2State::ShouldLog() {
+  const uint32_t new_value = LossyIncrement(&counter_) + 1;
+  return (new_value & (new_value - 1)) == 0;
+}
+
+bool LogEveryNSecState::ShouldLog(double seconds) {
+  using absl::base_internal::CycleClock;
+  LossyIncrement(&counter_);
+  const int64_t now_cycles = CycleClock::Now();
+  int64_t next_cycles = next_log_time_cycles_.load(std::memory_order_relaxed);
+#if defined(__myriad2__)
+  // myriad2 does not have 8-byte compare and exchange.  Use a racy version that
+  // is "good enough" but will over-log in the face of concurrent logging.
+  if (now_cycles > next_cycles) {
+    next_log_time_cycles_.store(now_cycles + seconds * CycleClock::Frequency(),
+                                std::memory_order_relaxed);
+    return true;
+  }
+  return false;
+#else
+  do {
+    if (now_cycles <= next_cycles) return false;
+  } while (!next_log_time_cycles_.compare_exchange_weak(
+      next_cycles, now_cycles + seconds * CycleClock::Frequency(),
+      std::memory_order_relaxed, std::memory_order_relaxed));
+  return true;
+#endif
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/conditions.h b/abseil-cpp/absl/log/internal/conditions.h
new file mode 100644
index 0000000..f576d65
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/conditions.h
@@ -0,0 +1,228 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/conditions.h
+// -----------------------------------------------------------------------------
+//
+// This file contains implementation of conditional log statements, like LOG_IF
+// including all the ABSL_LOG_INTERNAL_..._CONDITION_... macros and
+// various condition classes like LogEveryNState.
+
+#ifndef ABSL_LOG_INTERNAL_CONDITIONS_H_
+#define ABSL_LOG_INTERNAL_CONDITIONS_H_
+
+#if defined(_WIN32) || defined(__hexagon__)
+#include <cstdlib>
+#else
+#include <unistd.h>
+#endif
+#include <stdlib.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/log/internal/voidify.h"
+
+// `ABSL_LOG_INTERNAL_CONDITION` prefixes another macro that expands to a
+// temporary `LogMessage` instantiation followed by zero or more streamed
+// expressions.  This definition is tricky to read correctly.  It evaluates to
+// either
+//
+//   (void)0;
+//
+// or
+//
+//   ::absl::log_internal::Voidify() &&
+//       ::absl::log_internal::LogMessage(...) << "the user's message";
+//
+// If the condition is evaluable at compile time, as is often the case, it
+// compiles away to just one side or the other.
+//
+// Although this is not used anywhere a statement (e.g. `if`) could not go,
+// the ternary expression does a better job avoiding spurious diagnostics
+// (dangling else, missing switch case) and preserving noreturn semantics (e.g.
+// on `LOG(FATAL)`) without requiring braces.
+//
+// The `switch` ensures that this expansion is the begnning of a statement (as
+// opposed to an expression) and prevents shenanigans like
+// `AFunction(LOG(INFO))` and `decltype(LOG(INFO))`.  The apparently-redundant
+// `default` case makes the condition more amenable to Clang dataflow analysis.
+#define ABSL_LOG_INTERNAL_STATELESS_CONDITION(condition) \
+  switch (0)                                             \
+  case 0:                                                \
+  default:                                               \
+    !(condition) ? (void)0 : ::absl::log_internal::Voidify()&&
+
+// `ABSL_LOG_INTERNAL_STATEFUL_CONDITION` applies a condition like
+// `ABSL_LOG_INTERNAL_CONDITION` but adds to that a series of variable
+// declarations, including a local static object which stores the state needed
+// to implement the stateful macros like `LOG_EVERY_N`.
+//
+// `for`-loops are used to declare scoped variables without braces (to permit
+// streaming into the macro's expansion) and without the dangling-`else`
+// problems/diagnostics that come with `if`.
+//
+// Two more variables are declared in separate `for`-loops:
+//
+// * `COUNTER` implements a streamable token whose value when streamed is the
+//   number of times execution has passed through the macro.
+// * A boolean flag is used to prevent any of the `for`-loops from ever actually
+//   looping.
+#define ABSL_LOG_INTERNAL_STATEFUL_CONDITION(condition)             \
+  for (bool absl_log_internal_stateful_condition_do_log(condition); \
+       absl_log_internal_stateful_condition_do_log;                 \
+       absl_log_internal_stateful_condition_do_log = false)         \
+  ABSL_LOG_INTERNAL_STATEFUL_CONDITION_IMPL
+#define ABSL_LOG_INTERNAL_STATEFUL_CONDITION_IMPL(kind, ...)              \
+  for (static ::absl::log_internal::Log##kind##State                      \
+           absl_log_internal_stateful_condition_state;                    \
+       absl_log_internal_stateful_condition_do_log &&                     \
+       absl_log_internal_stateful_condition_state.ShouldLog(__VA_ARGS__); \
+       absl_log_internal_stateful_condition_do_log = false)               \
+    for (const uint32_t COUNTER ABSL_ATTRIBUTE_UNUSED =                   \
+             absl_log_internal_stateful_condition_state.counter();        \
+         absl_log_internal_stateful_condition_do_log;                     \
+         absl_log_internal_stateful_condition_do_log = false)
+
+// `ABSL_LOG_INTERNAL_CONDITION_*` serve to combine any conditions from the
+// macro (e.g. `LOG_IF` or `VLOG`) with inherent conditions (e.g.
+// `ABSL_MIN_LOG_LEVEL`) into a single boolean expression.  We could chain
+// ternary operators instead, however some versions of Clang sometimes issue
+// spurious diagnostics after such expressions due to a control flow analysis
+// bug.
+#ifdef ABSL_MIN_LOG_LEVEL
+#define ABSL_LOG_INTERNAL_CONDITION_INFO(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(                   \
+      (condition) && ::absl::LogSeverity::kInfo >=        \
+                         static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL))
+#define ABSL_LOG_INTERNAL_CONDITION_WARNING(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(                      \
+      (condition) && ::absl::LogSeverity::kWarning >=        \
+                         static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL))
+#define ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(                    \
+      (condition) && ::absl::LogSeverity::kError >=        \
+                         static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL))
+// NOTE: Use ternary operators instead of short-circuiting to mitigate
+// https://bugs.llvm.org/show_bug.cgi?id=51928.
+#define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition)                 \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(                                    \
+      ((condition)                                                         \
+           ? (::absl::LogSeverity::kFatal >=                               \
+                      static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \
+                  ? true                                                   \
+                  : (::absl::log_internal::AbortQuietly(), false))         \
+           : false))
+// NOTE: Use ternary operators instead of short-circuiting to mitigate
+// https://bugs.llvm.org/show_bug.cgi?id=51928.
+#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition)                \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(                                    \
+      ((condition)                                                         \
+           ? (::absl::LogSeverity::kFatal >=                               \
+                      static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \
+                  ? true                                                   \
+                  : (::absl::log_internal::ExitQuietly(), false))          \
+           : false))
+
+#define ABSL_LOG_INTERNAL_CONDITION_LEVEL(severity)                    \
+  for (int log_internal_severity_loop = 1; log_internal_severity_loop; \
+       log_internal_severity_loop = 0)                                 \
+    for (const absl::LogSeverity log_internal_severity =               \
+             ::absl::NormalizeLogSeverity(severity);                   \
+         log_internal_severity_loop; log_internal_severity_loop = 0)   \
+  ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL
+#define ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL(type, condition)    \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(                            \
+      (condition) &&                                               \
+      (log_internal_severity >=                                    \
+           static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) || \
+       (log_internal_severity == ::absl::LogSeverity::kFatal &&    \
+        (::absl::log_internal::AbortQuietly(), false))))
+#else  // ndef ABSL_MIN_LOG_LEVEL
+#define ABSL_LOG_INTERNAL_CONDITION_INFO(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#define ABSL_LOG_INTERNAL_CONDITION_WARNING(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#define ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#define ABSL_LOG_INTERNAL_CONDITION_LEVEL(severity)                    \
+  for (int log_internal_severity_loop = 1; log_internal_severity_loop; \
+       log_internal_severity_loop = 0)                                 \
+    for (const absl::LogSeverity log_internal_severity =               \
+             ::absl::NormalizeLogSeverity(severity);                   \
+         log_internal_severity_loop; log_internal_severity_loop = 0)   \
+  ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL
+#define ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL(type, condition) \
+  ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#endif  // ndef ABSL_MIN_LOG_LEVEL
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// Stateful condition class name should be "Log" + name + "State".
+class LogEveryNState final {
+ public:
+  bool ShouldLog(int n);
+  uint32_t counter() { return counter_.load(std::memory_order_relaxed); }
+
+ private:
+  std::atomic<uint32_t> counter_{0};
+};
+
+class LogFirstNState final {
+ public:
+  bool ShouldLog(int n);
+  uint32_t counter() { return counter_.load(std::memory_order_relaxed); }
+
+ private:
+  std::atomic<uint32_t> counter_{0};
+};
+
+class LogEveryPow2State final {
+ public:
+  bool ShouldLog();
+  uint32_t counter() { return counter_.load(std::memory_order_relaxed); }
+
+ private:
+  std::atomic<uint32_t> counter_{0};
+};
+
+class LogEveryNSecState final {
+ public:
+  bool ShouldLog(double seconds);
+  uint32_t counter() { return counter_.load(std::memory_order_relaxed); }
+
+ private:
+  std::atomic<uint32_t> counter_{0};
+  // Cycle count according to CycleClock that we should next log at.
+  std::atomic<int64_t> next_log_time_cycles_{0};
+};
+
+// Helper routines to abort the application quietly
+
+ABSL_ATTRIBUTE_NORETURN inline void AbortQuietly() { abort(); }
+ABSL_ATTRIBUTE_NORETURN inline void ExitQuietly() { _exit(1); }
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_CONDITIONS_H_
diff --git a/abseil-cpp/absl/log/internal/config.h b/abseil-cpp/absl/log/internal/config.h
new file mode 100644
index 0000000..379e9ab
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/config.h
@@ -0,0 +1,45 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/config.h
+// -----------------------------------------------------------------------------
+//
+
+#ifndef ABSL_LOG_INTERNAL_CONFIG_H_
+#define ABSL_LOG_INTERNAL_CONFIG_H_
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <cstdint>
+#else
+#include <sys/types.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+#ifdef _WIN32
+using Tid = uint32_t;
+#else
+using Tid = pid_t;
+#endif
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_CONFIG_H_
diff --git a/abseil-cpp/absl/log/internal/flags.h b/abseil-cpp/absl/log/internal/flags.h
new file mode 100644
index 0000000..0c5e81e
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/flags.h
@@ -0,0 +1,53 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/log_flags.h
+// -----------------------------------------------------------------------------
+//
+// This header declares set of flags which can be used to configure Abseil
+// Logging library behaviour at runtime.
+
+#ifndef ABSL_LOG_INTERNAL_FLAGS_H_
+#define ABSL_LOG_INTERNAL_FLAGS_H_
+
+#include <string>
+
+#include "absl/flags/declare.h"
+
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// These flags should not be used in C++ code to access logging library
+// configuration knobs. Use interfaces defined in absl/log/globals.h
+// instead. It is still ok to use these flags on a command line.
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+// Log messages at this severity or above are sent to stderr in *addition* to
+// logfiles.  Defaults to `ERROR`.  See log_severity.h for numeric values of
+// severity levels.
+ABSL_DECLARE_FLAG(int, stderrthreshold);
+
+// Log messages at this severity or above are logged; others are discarded.
+// Defaults to `INFO`, i.e. log all severities.  See log_severity.h for numeric
+// values of severity levels.
+ABSL_DECLARE_FLAG(int, minloglevel);
+
+// If specified in the form file:linenum, any messages logged from a matching
+// location will also include a backtrace.
+ABSL_DECLARE_FLAG(std::string, log_backtrace_at);
+
+// If true, the log prefix (severity, date, time, PID, etc.) is prepended to
+// each message logged. Defaults to true.
+ABSL_DECLARE_FLAG(bool, log_prefix);
+
+#endif  // ABSL_LOG_INTERNAL_FLAGS_H_
diff --git a/abseil-cpp/absl/log/internal/globals.cc b/abseil-cpp/absl/log/internal/globals.cc
new file mode 100644
index 0000000..359858f
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/globals.cc
@@ -0,0 +1,145 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/globals.h"
+
+#include <atomic>
+#include <cstdio>
+
+#if defined(__EMSCRIPTEN__)
+#include <emscripten/console.h>
+#endif
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/log_severity.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/strip.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+namespace {
+// Keeps track of whether Logging initialization is finalized.
+// Log messages generated before that will go to stderr.
+ABSL_CONST_INIT std::atomic<bool> logging_initialized(false);
+
+// The TimeZone used for logging. This may only be set once.
+ABSL_CONST_INIT std::atomic<absl::TimeZone*> timezone_ptr{nullptr};
+
+// If true, the logging library will symbolize stack in fatal messages
+ABSL_CONST_INIT std::atomic<bool> symbolize_stack_trace(true);
+
+// Specifies maximum number of stack frames to report in fatal messages.
+ABSL_CONST_INIT std::atomic<int> max_frames_in_stack_trace(64);
+
+ABSL_CONST_INIT std::atomic<bool> exit_on_dfatal(true);
+ABSL_CONST_INIT std::atomic<bool> suppress_sigabort_trace(false);
+}  // namespace
+
+bool IsInitialized() {
+  return logging_initialized.load(std::memory_order_acquire);
+}
+
+void SetInitialized() {
+  logging_initialized.store(true, std::memory_order_release);
+}
+
+void WriteToStderr(absl::string_view message, absl::LogSeverity severity) {
+  if (message.empty()) return;
+#if defined(__EMSCRIPTEN__)
+  // In WebAssembly, bypass filesystem emulation via fwrite.
+  // Skip a trailing newline character as emscripten_errn adds one itself.
+  const auto message_minus_newline = absl::StripSuffix(message, "\n");
+  // emscripten_errn was introduced in 3.1.41 but broken in standalone mode
+  // until 3.1.43.
+#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
+  emscripten_errn(message_minus_newline.data(), message_minus_newline.size());
+#else
+  std::string null_terminated_message(message_minus_newline);
+  _emscripten_err(null_terminated_message.c_str());
+#endif
+#else
+  // Avoid using std::cerr from this module since we may get called during
+  // exit code, and cerr may be partially or fully destroyed by then.
+  std::fwrite(message.data(), message.size(), 1, stderr);
+#endif
+
+#if defined(_WIN64) || defined(_WIN32) || defined(_WIN16)
+  // C99 requires stderr to not be fully-buffered by default (7.19.3.7), but
+  // MS CRT buffers it anyway, so we must `fflush` to ensure the string hits
+  // the console/file before the program dies (and takes the libc buffers
+  // with it).
+  // https://docs.microsoft.com/en-us/cpp/c-runtime-library/stream-i-o
+  if (severity >= absl::LogSeverity::kWarning) {
+    std::fflush(stderr);
+  }
+#else
+  // Avoid unused parameter warning in this branch.
+  (void)severity;
+#endif
+}
+
+void SetTimeZone(absl::TimeZone tz) {
+  absl::TimeZone* expected = nullptr;
+  absl::TimeZone* new_tz = new absl::TimeZone(tz);
+  // timezone_ptr can only be set once, otherwise new_tz is leaked.
+  if (!timezone_ptr.compare_exchange_strong(expected, new_tz,
+                                            std::memory_order_release,
+                                            std::memory_order_relaxed)) {
+    ABSL_RAW_LOG(FATAL,
+                 "absl::log_internal::SetTimeZone() has already been called");
+  }
+}
+
+const absl::TimeZone* TimeZone() {
+  return timezone_ptr.load(std::memory_order_acquire);
+}
+
+bool ShouldSymbolizeLogStackTrace() {
+  return symbolize_stack_trace.load(std::memory_order_acquire);
+}
+
+void EnableSymbolizeLogStackTrace(bool on_off) {
+  symbolize_stack_trace.store(on_off, std::memory_order_release);
+}
+
+int MaxFramesInLogStackTrace() {
+  return max_frames_in_stack_trace.load(std::memory_order_acquire);
+}
+
+void SetMaxFramesInLogStackTrace(int max_num_frames) {
+  max_frames_in_stack_trace.store(max_num_frames, std::memory_order_release);
+}
+
+bool ExitOnDFatal() { return exit_on_dfatal.load(std::memory_order_acquire); }
+
+void SetExitOnDFatal(bool on_off) {
+  exit_on_dfatal.store(on_off, std::memory_order_release);
+}
+
+bool SuppressSigabortTrace() {
+  return suppress_sigabort_trace.load(std::memory_order_acquire);
+}
+
+bool SetSuppressSigabortTrace(bool on_off) {
+  return suppress_sigabort_trace.exchange(on_off);
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/globals.h b/abseil-cpp/absl/log/internal/globals.h
new file mode 100644
index 0000000..27bc0d0
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/globals.h
@@ -0,0 +1,101 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/globals.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains various global objects and static helper routines
+// use in logging implementation.
+
+#ifndef ABSL_LOG_INTERNAL_GLOBALS_H_
+#define ABSL_LOG_INTERNAL_GLOBALS_H_
+
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// IsInitialized returns true if the logging library is initialized.
+// This function is async-signal-safe
+bool IsInitialized();
+
+// SetLoggingInitialized is called once after logging initialization is done.
+void SetInitialized();
+
+// Unconditionally write a `message` to stderr. If `severity` exceeds kInfo
+// we also flush the stderr stream.
+void WriteToStderr(absl::string_view message, absl::LogSeverity severity);
+
+// Set the TimeZone used for human-friendly times (for example, the log message
+// prefix) printed by the logging library. This may only be called once.
+void SetTimeZone(absl::TimeZone tz);
+
+// Returns the TimeZone used for human-friendly times (for example, the log
+// message prefix) printed by the logging library Returns nullptr prior to
+// initialization.
+const absl::TimeZone* TimeZone();
+
+// Returns true if stack traces emitted by the logging library should be
+// symbolized. This function is async-signal-safe.
+bool ShouldSymbolizeLogStackTrace();
+
+// Enables or disables symbolization of stack traces emitted by the
+// logging library. This function is async-signal-safe.
+void EnableSymbolizeLogStackTrace(bool on_off);
+
+// Returns the maximum number of frames that appear in stack traces
+// emitted by the logging library. This function is async-signal-safe.
+int MaxFramesInLogStackTrace();
+
+// Sets the maximum number of frames that appear in stack traces emitted by
+// the logging library. This function is async-signal-safe.
+void SetMaxFramesInLogStackTrace(int max_num_frames);
+
+// Determines whether we exit the program for a LOG(DFATAL) message in
+// debug mode.  It does this by skipping the call to Fail/FailQuietly.
+// This is intended for testing only.
+//
+// This can have some effects on LOG(FATAL) as well. Failure messages
+// are always allocated (rather than sharing a buffer), the crash
+// reason is not recorded, the "gwq" status message is not updated,
+// and the stack trace is not recorded.  The LOG(FATAL) *will* still
+// exit the program. Since this function is used only in testing,
+// these differences are acceptable.
+//
+// Additionally, LOG(LEVEL(FATAL)) is indistinguishable from LOG(DFATAL) and
+// will not terminate the program if SetExitOnDFatal(false) has been called.
+bool ExitOnDFatal();
+
+// SetExitOnDFatal() sets the ExitOnDFatal() status
+void SetExitOnDFatal(bool on_off);
+
+// Determines if the logging library should suppress logging of stacktraces in
+// the `SIGABRT` handler, typically because we just logged a stacktrace as part
+// of `LOG(FATAL)` and are about to send ourselves a `SIGABRT` to end the
+// program.
+bool SuppressSigabortTrace();
+
+// Sets the SuppressSigabortTrace() status and returns the previous state.
+bool SetSuppressSigabortTrace(bool on_off);
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_GLOBALS_H_
diff --git a/abseil-cpp/absl/log/internal/log_format.cc b/abseil-cpp/absl/log/internal/log_format.cc
new file mode 100644
index 0000000..23cef88
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_format.cc
@@ -0,0 +1,205 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/log_format.h"
+
+#include <string.h>
+
+#ifdef _MSC_VER
+#include <winsock2.h>  // For timeval
+#else
+#include <sys/time.h>
+#endif
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/base/optimization.h"
+#include "absl/log/internal/append_truncated.h"
+#include "absl/log/internal/config.h"
+#include "absl/log/internal/globals.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/civil_time.h"
+#include "absl/time/time.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+namespace {
+
+// This templated function avoids compiler warnings about tautological
+// comparisons when log_internal::Tid is unsigned. It can be replaced with a
+// constexpr if once the minimum C++ version Abseil supports is C++17.
+template <typename T>
+inline std::enable_if_t<!std::is_signed<T>::value>
+PutLeadingWhitespace(T tid, char*& p) {
+  if (tid < 10) *p++ = ' ';
+  if (tid < 100) *p++ = ' ';
+  if (tid < 1000) *p++ = ' ';
+  if (tid < 10000) *p++ = ' ';
+  if (tid < 100000) *p++ = ' ';
+  if (tid < 1000000) *p++ = ' ';
+}
+
+template <typename T>
+inline std::enable_if_t<std::is_signed<T>::value>
+PutLeadingWhitespace(T tid, char*& p) {
+  if (tid >= 0 && tid < 10) *p++ = ' ';
+  if (tid > -10 && tid < 100) *p++ = ' ';
+  if (tid > -100 && tid < 1000) *p++ = ' ';
+  if (tid > -1000 && tid < 10000) *p++ = ' ';
+  if (tid > -10000 && tid < 100000) *p++ = ' ';
+  if (tid > -100000 && tid < 1000000) *p++ = ' ';
+}
+
+// The fields before the filename are all fixed-width except for the thread ID,
+// which is of bounded width.
+size_t FormatBoundedFields(absl::LogSeverity severity, absl::Time timestamp,
+                           log_internal::Tid tid, absl::Span<char>& buf) {
+  constexpr size_t kBoundedFieldsMaxLen =
+      sizeof("SMMDD HH:MM:SS.NNNNNN  ") +
+      (1 + std::numeric_limits<log_internal::Tid>::digits10 + 1) - sizeof("");
+  if (ABSL_PREDICT_FALSE(buf.size() < kBoundedFieldsMaxLen)) {
+    // We don't bother trying to truncate these fields if the buffer is too
+    // short (or almost too short) because it would require doing a lot more
+    // length checking (slow) and it should never happen.  A 15kB buffer should
+    // be enough for anyone.  Instead we mark `buf` full without writing
+    // anything.
+    buf.remove_suffix(buf.size());
+    return 0;
+  }
+
+  // We can't call absl::LocalTime(), localtime_r(), or anything else here that
+  // isn't async-signal-safe. We can only use the time zone if it has already
+  // been loaded.
+  const absl::TimeZone* tz = absl::log_internal::TimeZone();
+  if (ABSL_PREDICT_FALSE(tz == nullptr)) {
+    // If a time zone hasn't been set yet because we are logging before the
+    // logging library has been initialized, we fallback to a simpler, slower
+    // method. Just report the raw Unix time in seconds. We cram this into the
+    // normal time format for the benefit of parsers.
+    auto tv = absl::ToTimeval(timestamp);
+    int snprintf_result = absl::SNPrintF(
+        buf.data(), buf.size(), "%c0000 00:00:%02d.%06d %7d ",
+        absl::LogSeverityName(severity)[0], static_cast<int>(tv.tv_sec),
+        static_cast<int>(tv.tv_usec), static_cast<int>(tid));
+    if (snprintf_result >= 0) {
+      buf.remove_prefix(static_cast<size_t>(snprintf_result));
+      return static_cast<size_t>(snprintf_result);
+    }
+    return 0;
+  }
+
+  char* p = buf.data();
+  *p++ = absl::LogSeverityName(severity)[0];
+  const absl::TimeZone::CivilInfo ci = tz->At(timestamp);
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.month()), p);
+  p += 2;
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.day()), p);
+  p += 2;
+  *p++ = ' ';
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.hour()), p);
+  p += 2;
+  *p++ = ':';
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.minute()),
+                                       p);
+  p += 2;
+  *p++ = ':';
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.second()),
+                                       p);
+  p += 2;
+  *p++ = '.';
+  const int64_t usecs = absl::ToInt64Microseconds(ci.subsecond);
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs / 10000), p);
+  p += 2;
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs / 100 % 100),
+                                       p);
+  p += 2;
+  absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs % 100), p);
+  p += 2;
+  *p++ = ' ';
+  PutLeadingWhitespace(tid, p);
+  p = absl::numbers_internal::FastIntToBuffer(tid, p);
+  *p++ = ' ';
+  const size_t bytes_formatted = static_cast<size_t>(p - buf.data());
+  buf.remove_prefix(bytes_formatted);
+  return bytes_formatted;
+}
+
+size_t FormatLineNumber(int line, absl::Span<char>& buf) {
+  constexpr size_t kLineFieldMaxLen =
+      sizeof(":] ") + (1 + std::numeric_limits<int>::digits10 + 1) - sizeof("");
+  if (ABSL_PREDICT_FALSE(buf.size() < kLineFieldMaxLen)) {
+    // As above, we don't bother trying to truncate this if the buffer is too
+    // short and it should never happen.
+    buf.remove_suffix(buf.size());
+    return 0;
+  }
+  char* p = buf.data();
+  *p++ = ':';
+  p = absl::numbers_internal::FastIntToBuffer(line, p);
+  *p++ = ']';
+  *p++ = ' ';
+  const size_t bytes_formatted = static_cast<size_t>(p - buf.data());
+  buf.remove_prefix(bytes_formatted);
+  return bytes_formatted;
+}
+
+}  // namespace
+
+std::string FormatLogMessage(absl::LogSeverity severity,
+                             absl::CivilSecond civil_second,
+                             absl::Duration subsecond, log_internal::Tid tid,
+                             absl::string_view basename, int line,
+                             PrefixFormat format, absl::string_view message) {
+  return absl::StrFormat(
+      "%c%02d%02d %02d:%02d:%02d.%06d %7d %s:%d] %s%s",
+      absl::LogSeverityName(severity)[0], civil_second.month(),
+      civil_second.day(), civil_second.hour(), civil_second.minute(),
+      civil_second.second(), absl::ToInt64Microseconds(subsecond), tid,
+      basename, line, format == PrefixFormat::kRaw ? "RAW: " : "", message);
+}
+
+// This method is fairly hot, and the library always passes a huge `buf`, so we
+// save some bounds-checking cycles by not trying to do precise truncation.
+// Truncating at a field boundary is probably a better UX anyway.
+//
+// The prefix is written in three parts, each of which does a single
+// bounds-check and truncation:
+// 1. severity, timestamp, and thread ID
+// 2. filename
+// 3. line number and bracket
+size_t FormatLogPrefix(absl::LogSeverity severity, absl::Time timestamp,
+                       log_internal::Tid tid, absl::string_view basename,
+                       int line, PrefixFormat format, absl::Span<char>& buf) {
+  auto prefix_size = FormatBoundedFields(severity, timestamp, tid, buf);
+  prefix_size += log_internal::AppendTruncated(basename, buf);
+  prefix_size += FormatLineNumber(line, buf);
+  if (format == PrefixFormat::kRaw)
+    prefix_size += log_internal::AppendTruncated("RAW: ", buf);
+  return prefix_size;
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/log_format.h b/abseil-cpp/absl/log/internal/log_format.h
new file mode 100644
index 0000000..95a45ed
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_format.h
@@ -0,0 +1,78 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/log_format.h
+// -----------------------------------------------------------------------------
+//
+// This file declares routines implementing formatting of log message and log
+// prefix.
+
+#ifndef ABSL_LOG_INTERNAL_LOG_FORMAT_H_
+#define ABSL_LOG_INTERNAL_LOG_FORMAT_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/config.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/civil_time.h"
+#include "absl/time/time.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+enum class PrefixFormat {
+  kNotRaw,
+  kRaw,
+};
+
+// Formats log message based on provided data.
+std::string FormatLogMessage(absl::LogSeverity severity,
+                             absl::CivilSecond civil_second,
+                             absl::Duration subsecond, log_internal::Tid tid,
+                             absl::string_view basename, int line,
+                             PrefixFormat format, absl::string_view message);
+
+// Formats various entry metadata into a text string meant for use as a
+// prefix on a log message string.  Writes into `buf`, advances `buf` to point
+// at the remainder of the buffer (i.e. past any written bytes), and returns the
+// number of bytes written.
+//
+// In addition to calling `buf->remove_prefix()` (or the equivalent), this
+// function may also do `buf->remove_suffix(buf->size())` in cases where no more
+// bytes (i.e. no message data) should be written into the buffer.  For example,
+// if the prefix ought to be:
+//   I0926 09:00:00.000000 1234567 foo.cc:123]
+// `buf` is too small, the function might fill the whole buffer:
+//   I0926 09:00:00.000000 1234
+// (note the apparrently incorrect thread ID), or it might write less:
+//   I0926 09:00:00.000000
+// In this case, it might also empty `buf` prior to returning to prevent
+// message data from being written into the space where a reader would expect to
+// see a thread ID.
+size_t FormatLogPrefix(absl::LogSeverity severity, absl::Time timestamp,
+                       log_internal::Tid tid, absl::string_view basename,
+                       int line, PrefixFormat format, absl::Span<char>& buf);
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_LOG_FORMAT_H_
diff --git a/abseil-cpp/absl/log/internal/log_impl.h b/abseil-cpp/absl/log/internal/log_impl.h
new file mode 100644
index 0000000..9326780
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_impl.h
@@ -0,0 +1,216 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_LOG_INTERNAL_LOG_IMPL_H_
+#define ABSL_LOG_INTERNAL_LOG_IMPL_H_
+
+#include "absl/log/internal/conditions.h"
+#include "absl/log/internal/log_message.h"
+#include "absl/log/internal/strip.h"
+
+// ABSL_LOG()
+#define ABSL_LOG_INTERNAL_LOG_IMPL(severity)             \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+// ABSL_PLOG()
+#define ABSL_LOG_INTERNAL_PLOG_IMPL(severity)              \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true)   \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
+          .WithPerror()
+
+// ABSL_DLOG()
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DLOG_IMPL(severity)            \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#else
+#define ABSL_LOG_INTERNAL_DLOG_IMPL(severity)             \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#endif
+
+#define ABSL_LOG_INTERNAL_LOG_IF_IMPL(severity, condition)    \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#define ABSL_LOG_INTERNAL_PLOG_IF_IMPL(severity, condition)   \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()    \
+          .WithPerror()
+
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition)   \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#else
+#define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition)              \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false && (condition)) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#endif
+
+// ABSL_LOG_EVERY_N
+#define ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(severity, n)            \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+// ABSL_LOG_FIRST_N
+#define ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(severity, n)            \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(FirstN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+// ABSL_LOG_EVERY_POW_2
+#define ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(severity)           \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryPow2) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+// ABSL_LOG_EVERY_N_SEC
+#define ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(severity, n_seconds)           \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryNSec, n_seconds) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_PLOG_EVERY_N_IMPL(severity, n)           \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()         \
+          .WithPerror()
+
+#define ABSL_LOG_INTERNAL_PLOG_FIRST_N_IMPL(severity, n)           \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(FirstN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()         \
+          .WithPerror()
+
+#define ABSL_LOG_INTERNAL_PLOG_EVERY_POW_2_IMPL(severity)          \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryPow2) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()         \
+          .WithPerror()
+
+#define ABSL_LOG_INTERNAL_PLOG_EVERY_N_SEC_IMPL(severity, n_seconds)          \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryNSec, n_seconds) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()                    \
+          .WithPerror()
+
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)       \
+  (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)       \
+  (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)        \
+  (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)                   \
+  (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#else  // def NDEBUG
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)      \
+  (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)      \
+  (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)       \
+  (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)                  \
+  (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#endif  // def NDEBUG
+
+#define ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(severity, condition, n)   \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_LOG_IF_FIRST_N_IMPL(severity, condition, n)   \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_LOG_IF_EVERY_POW_2_IMPL(severity, condition)  \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_SEC_IMPL(severity, condition,  \
+                                                  n_seconds)            \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
+                                                             n_seconds) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_IMPL(severity, condition, n)  \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()              \
+          .WithPerror()
+
+#define ABSL_LOG_INTERNAL_PLOG_IF_FIRST_N_IMPL(severity, condition, n)  \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()              \
+          .WithPerror()
+
+#define ABSL_LOG_INTERNAL_PLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()              \
+          .WithPerror()
+
+#define ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
+                                                   n_seconds)           \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
+                                                             n_seconds) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()              \
+          .WithPerror()
+
+#ifndef NDEBUG
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n)  \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n)  \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
+                                                   n_seconds)           \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
+                                                             n_seconds) \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#else  // def NDEBUG
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n)   \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
+      EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n)   \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
+      FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition)  \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
+      EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition,  \
+                                                   n_seconds)            \
+  ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
+      EveryNSec, n_seconds)                                              \
+      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+#endif  // def NDEBUG
+
+#endif  // ABSL_LOG_INTERNAL_LOG_IMPL_H_
diff --git a/abseil-cpp/absl/log/internal/log_message.cc b/abseil-cpp/absl/log/internal/log_message.cc
new file mode 100644
index 0000000..10ac245
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_message.cc
@@ -0,0 +1,633 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/log_message.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <tuple>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/strerror.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/base/log_severity.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/debugging/internal/examine_stack.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/append_truncated.h"
+#include "absl/log/internal/globals.h"
+#include "absl/log/internal/log_format.h"
+#include "absl/log/internal/log_sink_set.h"
+#include "absl/log/internal/proto.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+#include "absl/log/log_sink_registry.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "absl/types/span.h"
+
+extern "C" ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalOnFatalLogMessage)(const absl::LogEntry&) {
+  // Default - Do nothing
+}
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+namespace {
+// message `logging.proto.Event`
+enum EventTag : uint8_t {
+  kValue = 7,
+};
+
+// message `logging.proto.Value`
+enum ValueTag : uint8_t {
+  kString = 1,
+  kStringLiteral = 6,
+};
+
+// Decodes a `logging.proto.Value` from `buf` and writes a string representation
+// into `dst`.  The string representation will be truncated if `dst` is not
+// large enough to hold it.  Returns false if `dst` has size zero or one (i.e.
+// sufficient only for a nul-terminator) and no decoded data could be written.
+// This function may or may not write a nul-terminator into `dst`, and it may or
+// may not truncate the data it writes in order to do make space for that nul
+// terminator.  In any case, `dst` will be advanced to point at the byte where
+// subsequent writes should begin.
+bool PrintValue(absl::Span<char>& dst, absl::Span<const char> buf) {
+  if (dst.size() <= 1) return false;
+  ProtoField field;
+  while (field.DecodeFrom(&buf)) {
+    switch (field.tag()) {
+      case ValueTag::kString:
+      case ValueTag::kStringLiteral:
+        if (field.type() == WireType::kLengthDelimited)
+          if (log_internal::AppendTruncated(field.string_value(), dst) <
+              field.string_value().size())
+            return false;
+    }
+  }
+  return true;
+}
+
+absl::string_view Basename(absl::string_view filepath) {
+#ifdef _WIN32
+  size_t path = filepath.find_last_of("/\\");
+#else
+  size_t path = filepath.find_last_of('/');
+#endif
+  if (path != filepath.npos) filepath.remove_prefix(path + 1);
+  return filepath;
+}
+
+void WriteToString(const char* data, void* str) {
+  reinterpret_cast<std::string*>(str)->append(data);
+}
+void WriteToStream(const char* data, void* os) {
+  auto* cast_os = static_cast<std::ostream*>(os);
+  *cast_os << data;
+}
+}  // namespace
+
+struct LogMessage::LogMessageData final {
+  LogMessageData(const char* file, int line, absl::LogSeverity severity,
+                 absl::Time timestamp);
+  LogMessageData(const LogMessageData&) = delete;
+  LogMessageData& operator=(const LogMessageData&) = delete;
+
+  // `LogEntry` sent to `LogSink`s; contains metadata.
+  absl::LogEntry entry;
+
+  // true => this was first fatal msg
+  bool first_fatal;
+  // true => all failures should be quiet
+  bool fail_quietly;
+  // true => PLOG was requested
+  bool is_perror;
+
+  // Extra `LogSink`s to log to, in addition to `global_sinks`.
+  absl::InlinedVector<absl::LogSink*, 16> extra_sinks;
+  // If true, log to `extra_sinks` but not to `global_sinks` or hardcoded
+  // non-sink targets (e.g. stderr, log files).
+  bool extra_sinks_only;
+
+  std::ostream manipulated;  // ostream with IO manipulators applied
+
+  // A `logging.proto.Event` proto message is built into `encoded_buf`.
+  std::array<char, kLogMessageBufferSize> encoded_buf;
+  // `encoded_remaining` is the suffix of `encoded_buf` that has not been filled
+  // yet.  If a datum to be encoded does not fit into `encoded_remaining` and
+  // cannot be truncated to fit, the size of `encoded_remaining` will be zeroed
+  // to prevent encoding of any further data.  Note that in this case its data()
+  // pointer will not point past the end of `encoded_buf`.
+  absl::Span<char> encoded_remaining;
+
+  // A formatted string message is built in `string_buf`.
+  std::array<char, kLogMessageBufferSize> string_buf;
+
+  void FinalizeEncodingAndFormat();
+};
+
+LogMessage::LogMessageData::LogMessageData(const char* file, int line,
+                                           absl::LogSeverity severity,
+                                           absl::Time timestamp)
+    : extra_sinks_only(false),
+      manipulated(nullptr),
+      // This `absl::MakeSpan` silences spurious -Wuninitialized from GCC:
+      encoded_remaining(absl::MakeSpan(encoded_buf)) {
+  // Legacy defaults for LOG's ostream:
+  manipulated.setf(std::ios_base::showbase | std::ios_base::boolalpha);
+  entry.full_filename_ = file;
+  entry.base_filename_ = Basename(file);
+  entry.line_ = line;
+  entry.prefix_ = absl::ShouldPrependLogPrefix();
+  entry.severity_ = absl::NormalizeLogSeverity(severity);
+  entry.verbose_level_ = absl::LogEntry::kNoVerbosityLevel;
+  entry.timestamp_ = timestamp;
+  entry.tid_ = absl::base_internal::GetCachedTID();
+}
+
+void LogMessage::LogMessageData::FinalizeEncodingAndFormat() {
+  // Note that `encoded_remaining` may have zero size without pointing past the
+  // end of `encoded_buf`, so the difference between `data()` pointers is used
+  // to compute the size of `encoded_data`.
+  absl::Span<const char> encoded_data(
+      encoded_buf.data(),
+      static_cast<size_t>(encoded_remaining.data() - encoded_buf.data()));
+  // `string_remaining` is the suffix of `string_buf` that has not been filled
+  // yet.
+  absl::Span<char> string_remaining(string_buf);
+  // We may need to write a newline and nul-terminator at the end of the decoded
+  // string data.  Rather than worry about whether those should overwrite the
+  // end of the string (if the buffer is full) or be appended, we avoid writing
+  // into the last two bytes so we always have space to append.
+  string_remaining.remove_suffix(2);
+  entry.prefix_len_ =
+      entry.prefix() ? log_internal::FormatLogPrefix(
+                           entry.log_severity(), entry.timestamp(), entry.tid(),
+                           entry.source_basename(), entry.source_line(),
+                           log_internal::ThreadIsLoggingToLogSink()
+                               ? PrefixFormat::kRaw
+                               : PrefixFormat::kNotRaw,
+                           string_remaining)
+                     : 0;
+  // Decode data from `encoded_buf` until we run out of data or we run out of
+  // `string_remaining`.
+  ProtoField field;
+  while (field.DecodeFrom(&encoded_data)) {
+    switch (field.tag()) {
+      case EventTag::kValue:
+        if (field.type() != WireType::kLengthDelimited) continue;
+        if (PrintValue(string_remaining, field.bytes_value())) continue;
+        break;
+    }
+    break;
+  }
+  auto chars_written =
+      static_cast<size_t>(string_remaining.data() - string_buf.data());
+    string_buf[chars_written++] = '\n';
+  string_buf[chars_written++] = '\0';
+  entry.text_message_with_prefix_and_newline_and_nul_ =
+      absl::MakeSpan(string_buf).subspan(0, chars_written);
+}
+
+LogMessage::LogMessage(const char* file, int line, absl::LogSeverity severity)
+    : data_(absl::make_unique<LogMessageData>(file, line, severity,
+                                              absl::Now())) {
+  data_->first_fatal = false;
+  data_->is_perror = false;
+  data_->fail_quietly = false;
+
+  // This logs a backtrace even if the location is subsequently changed using
+  // AtLocation.  This quirk, and the behavior when AtLocation is called twice,
+  // are fixable but probably not worth fixing.
+  LogBacktraceIfNeeded();
+}
+
+LogMessage::LogMessage(const char* file, int line, InfoTag)
+    : LogMessage(file, line, absl::LogSeverity::kInfo) {}
+LogMessage::LogMessage(const char* file, int line, WarningTag)
+    : LogMessage(file, line, absl::LogSeverity::kWarning) {}
+LogMessage::LogMessage(const char* file, int line, ErrorTag)
+    : LogMessage(file, line, absl::LogSeverity::kError) {}
+
+LogMessage::~LogMessage() {
+#ifdef ABSL_MIN_LOG_LEVEL
+  if (data_->entry.log_severity() <
+          static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
+      data_->entry.log_severity() < absl::LogSeverity::kFatal) {
+    return;
+  }
+#endif
+  Flush();
+}
+
+LogMessage& LogMessage::AtLocation(absl::string_view file, int line) {
+  data_->entry.full_filename_ = file;
+  data_->entry.base_filename_ = Basename(file);
+  data_->entry.line_ = line;
+  LogBacktraceIfNeeded();
+  return *this;
+}
+
+LogMessage& LogMessage::NoPrefix() {
+  data_->entry.prefix_ = false;
+  return *this;
+}
+
+LogMessage& LogMessage::WithVerbosity(int verbose_level) {
+  if (verbose_level == absl::LogEntry::kNoVerbosityLevel) {
+    data_->entry.verbose_level_ = absl::LogEntry::kNoVerbosityLevel;
+  } else {
+    data_->entry.verbose_level_ = std::max(0, verbose_level);
+  }
+  return *this;
+}
+
+LogMessage& LogMessage::WithTimestamp(absl::Time timestamp) {
+  data_->entry.timestamp_ = timestamp;
+  return *this;
+}
+
+LogMessage& LogMessage::WithThreadID(absl::LogEntry::tid_t tid) {
+  data_->entry.tid_ = tid;
+  return *this;
+}
+
+LogMessage& LogMessage::WithMetadataFrom(const absl::LogEntry& entry) {
+  data_->entry.full_filename_ = entry.full_filename_;
+  data_->entry.base_filename_ = entry.base_filename_;
+  data_->entry.line_ = entry.line_;
+  data_->entry.prefix_ = entry.prefix_;
+  data_->entry.severity_ = entry.severity_;
+  data_->entry.verbose_level_ = entry.verbose_level_;
+  data_->entry.timestamp_ = entry.timestamp_;
+  data_->entry.tid_ = entry.tid_;
+  return *this;
+}
+
+LogMessage& LogMessage::WithPerror() {
+  data_->is_perror = true;
+  return *this;
+}
+
+LogMessage& LogMessage::ToSinkAlso(absl::LogSink* sink) {
+  ABSL_INTERNAL_CHECK(sink, "null LogSink*");
+  data_->extra_sinks.push_back(sink);
+  return *this;
+}
+
+LogMessage& LogMessage::ToSinkOnly(absl::LogSink* sink) {
+  ABSL_INTERNAL_CHECK(sink, "null LogSink*");
+  data_->extra_sinks.clear();
+  data_->extra_sinks.push_back(sink);
+  data_->extra_sinks_only = true;
+  return *this;
+}
+
+#ifdef __ELF__
+extern "C" void __gcov_dump() ABSL_ATTRIBUTE_WEAK;
+extern "C" void __gcov_flush() ABSL_ATTRIBUTE_WEAK;
+#endif
+
+void LogMessage::FailWithoutStackTrace() {
+  // Now suppress repeated trace logging:
+  log_internal::SetSuppressSigabortTrace(true);
+#if defined _DEBUG && defined COMPILER_MSVC
+  // When debugging on windows, avoid the obnoxious dialog.
+  __debugbreak();
+#endif
+
+#ifdef __ELF__
+  // For b/8737634, flush coverage if we are in coverage mode.
+  if (&__gcov_dump != nullptr) {
+    __gcov_dump();
+  } else if (&__gcov_flush != nullptr) {
+    __gcov_flush();
+  }
+#endif
+
+  abort();
+}
+
+void LogMessage::FailQuietly() {
+  // _exit. Calling abort() would trigger all sorts of death signal handlers
+  // and a detailed stack trace. Calling exit() would trigger the onexit
+  // handlers, including the heap-leak checker, which is guaranteed to fail in
+  // this case: we probably just new'ed the std::string that we logged.
+  // Anyway, if you're calling Fail or FailQuietly, you're trying to bail out
+  // of the program quickly, and it doesn't make much sense for FailQuietly to
+  // offer different guarantees about exit behavior than Fail does. (And as a
+  // consequence for QCHECK and CHECK to offer different exit behaviors)
+  _exit(1);
+}
+
+LogMessage& LogMessage::operator<<(const std::string& v) {
+  CopyToEncodedBuffer<StringType::kNotLiteral>(v);
+  return *this;
+}
+
+LogMessage& LogMessage::operator<<(absl::string_view v) {
+  CopyToEncodedBuffer<StringType::kNotLiteral>(v);
+  return *this;
+}
+LogMessage& LogMessage::operator<<(std::ostream& (*m)(std::ostream& os)) {
+  OstreamView view(*data_);
+  data_->manipulated << m;
+  return *this;
+}
+LogMessage& LogMessage::operator<<(std::ios_base& (*m)(std::ios_base& os)) {
+  OstreamView view(*data_);
+  data_->manipulated << m;
+  return *this;
+}
+template LogMessage& LogMessage::operator<<(const char& v);
+template LogMessage& LogMessage::operator<<(const signed char& v);
+template LogMessage& LogMessage::operator<<(const unsigned char& v);
+template LogMessage& LogMessage::operator<<(const short& v);           // NOLINT
+template LogMessage& LogMessage::operator<<(const unsigned short& v);  // NOLINT
+template LogMessage& LogMessage::operator<<(const int& v);
+template LogMessage& LogMessage::operator<<(const unsigned int& v);
+template LogMessage& LogMessage::operator<<(const long& v);           // NOLINT
+template LogMessage& LogMessage::operator<<(const unsigned long& v);  // NOLINT
+template LogMessage& LogMessage::operator<<(const long long& v);      // NOLINT
+template LogMessage& LogMessage::operator<<(
+    const unsigned long long& v);  // NOLINT
+template LogMessage& LogMessage::operator<<(void* const& v);
+template LogMessage& LogMessage::operator<<(const void* const& v);
+template LogMessage& LogMessage::operator<<(const float& v);
+template LogMessage& LogMessage::operator<<(const double& v);
+template LogMessage& LogMessage::operator<<(const bool& v);
+
+void LogMessage::Flush() {
+  if (data_->entry.log_severity() < absl::MinLogLevel()) return;
+
+  if (data_->is_perror) {
+    InternalStream() << ": " << absl::base_internal::StrError(errno_saver_())
+                     << " [" << errno_saver_() << "]";
+  }
+
+  // Have we already seen a fatal message?
+  ABSL_CONST_INIT static std::atomic<bool> seen_fatal(false);
+  if (data_->entry.log_severity() == absl::LogSeverity::kFatal &&
+      absl::log_internal::ExitOnDFatal()) {
+    // Exactly one LOG(FATAL) message is responsible for aborting the process,
+    // even if multiple threads LOG(FATAL) concurrently.
+    bool expected_seen_fatal = false;
+    if (seen_fatal.compare_exchange_strong(expected_seen_fatal, true,
+                                           std::memory_order_relaxed)) {
+      data_->first_fatal = true;
+    }
+  }
+
+  data_->FinalizeEncodingAndFormat();
+  data_->entry.encoding_ =
+      absl::string_view(data_->encoded_buf.data(),
+                        static_cast<size_t>(data_->encoded_remaining.data() -
+                                            data_->encoded_buf.data()));
+  SendToLog();
+}
+
+void LogMessage::SetFailQuietly() { data_->fail_quietly = true; }
+
+LogMessage::OstreamView::OstreamView(LogMessageData& message_data)
+    : data_(message_data), encoded_remaining_copy_(data_.encoded_remaining) {
+  // This constructor sets the `streambuf` up so that streaming into an attached
+  // ostream encodes string data in-place.  To do that, we write appropriate
+  // headers into the buffer using a copy of the buffer view so that we can
+  // decide not to keep them later if nothing is ever streamed in.  We don't
+  // know how much data we'll get, but we can use the size of the remaining
+  // buffer as an upper bound and fill in the right size once we know it.
+  message_start_ =
+      EncodeMessageStart(EventTag::kValue, encoded_remaining_copy_.size(),
+                         &encoded_remaining_copy_);
+  string_start_ =
+      EncodeMessageStart(ValueTag::kString, encoded_remaining_copy_.size(),
+                         &encoded_remaining_copy_);
+  setp(encoded_remaining_copy_.data(),
+       encoded_remaining_copy_.data() + encoded_remaining_copy_.size());
+  data_.manipulated.rdbuf(this);
+}
+
+LogMessage::OstreamView::~OstreamView() {
+  data_.manipulated.rdbuf(nullptr);
+  if (!string_start_.data()) {
+    // The second field header didn't fit.  Whether the first one did or not, we
+    // shouldn't commit `encoded_remaining_copy_`, and we also need to zero the
+    // size of `data_->encoded_remaining` so that no more data are encoded.
+    data_.encoded_remaining.remove_suffix(data_.encoded_remaining.size());
+    return;
+  }
+  const absl::Span<const char> contents(pbase(),
+                                        static_cast<size_t>(pptr() - pbase()));
+  if (contents.empty()) return;
+  encoded_remaining_copy_.remove_prefix(contents.size());
+  EncodeMessageLength(string_start_, &encoded_remaining_copy_);
+  EncodeMessageLength(message_start_, &encoded_remaining_copy_);
+  data_.encoded_remaining = encoded_remaining_copy_;
+}
+
+std::ostream& LogMessage::OstreamView::stream() { return data_.manipulated; }
+
+bool LogMessage::IsFatal() const {
+  return data_->entry.log_severity() == absl::LogSeverity::kFatal &&
+         absl::log_internal::ExitOnDFatal();
+}
+
+void LogMessage::PrepareToDie() {
+  // If we log a FATAL message, flush all the log destinations, then toss
+  // a signal for others to catch. We leave the logs in a state that
+  // someone else can use them (as long as they flush afterwards)
+  if (data_->first_fatal) {
+    // Notify observers about the upcoming fatal error.
+    ABSL_INTERNAL_C_SYMBOL(AbslInternalOnFatalLogMessage)(data_->entry);
+  }
+
+  if (!data_->fail_quietly) {
+    // Log the message first before we start collecting stack trace.
+    log_internal::LogToSinks(data_->entry, absl::MakeSpan(data_->extra_sinks),
+                             data_->extra_sinks_only);
+
+    // `DumpStackTrace` generates an empty string under MSVC.
+    // Adding the constant prefix here simplifies testing.
+    data_->entry.stacktrace_ = "*** Check failure stack trace: ***\n";
+    debugging_internal::DumpStackTrace(
+        0, log_internal::MaxFramesInLogStackTrace(),
+        log_internal::ShouldSymbolizeLogStackTrace(), WriteToString,
+        &data_->entry.stacktrace_);
+  }
+}
+
+void LogMessage::Die() {
+  absl::FlushLogSinks();
+
+  if (data_->fail_quietly) {
+    FailQuietly();
+  } else {
+    FailWithoutStackTrace();
+  }
+}
+
+void LogMessage::SendToLog() {
+  if (IsFatal()) PrepareToDie();
+  // Also log to all registered sinks, even if OnlyLogToStderr() is set.
+  log_internal::LogToSinks(data_->entry, absl::MakeSpan(data_->extra_sinks),
+                           data_->extra_sinks_only);
+  if (IsFatal()) Die();
+}
+
+void LogMessage::LogBacktraceIfNeeded() {
+  if (!absl::log_internal::IsInitialized()) return;
+
+  if (!absl::log_internal::ShouldLogBacktraceAt(data_->entry.source_basename(),
+                                                data_->entry.source_line()))
+    return;
+  OstreamView view(*data_);
+  view.stream() << " (stacktrace:\n";
+  debugging_internal::DumpStackTrace(
+      1, log_internal::MaxFramesInLogStackTrace(),
+      log_internal::ShouldSymbolizeLogStackTrace(), WriteToStream,
+      &view.stream());
+  view.stream() << ") ";
+}
+
+// Encodes into `data_->encoded_remaining` a partial `logging.proto.Event`
+// containing the specified string data using a `Value` field appropriate to
+// `str_type`.  Truncates `str` if necessary, but emits nothing and marks the
+// buffer full if  even the field headers do not fit.
+template <LogMessage::StringType str_type>
+void LogMessage::CopyToEncodedBuffer(absl::string_view str) {
+  auto encoded_remaining_copy = data_->encoded_remaining;
+  auto start = EncodeMessageStart(
+      EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + str.size(),
+      &encoded_remaining_copy);
+  // If the `logging.proto.Event.value` field header did not fit,
+  // `EncodeMessageStart` will have zeroed `encoded_remaining_copy`'s size and
+  // `EncodeStringTruncate` will fail too.
+  if (EncodeStringTruncate(str_type == StringType::kLiteral
+                               ? ValueTag::kStringLiteral
+                               : ValueTag::kString,
+                           str, &encoded_remaining_copy)) {
+    // The string may have been truncated, but the field header fit.
+    EncodeMessageLength(start, &encoded_remaining_copy);
+    data_->encoded_remaining = encoded_remaining_copy;
+  } else {
+    // The field header(s) did not fit; zero `encoded_remaining` so we don't
+    // write anything else later.
+    data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
+  }
+}
+template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
+    absl::string_view str);
+template void LogMessage::CopyToEncodedBuffer<
+    LogMessage::StringType::kNotLiteral>(absl::string_view str);
+template <LogMessage::StringType str_type>
+void LogMessage::CopyToEncodedBuffer(char ch, size_t num) {
+  auto encoded_remaining_copy = data_->encoded_remaining;
+  auto value_start = EncodeMessageStart(
+      EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + num,
+      &encoded_remaining_copy);
+  auto str_start = EncodeMessageStart(str_type == StringType::kLiteral
+                                          ? ValueTag::kStringLiteral
+                                          : ValueTag::kString,
+                                      num, &encoded_remaining_copy);
+  if (str_start.data()) {
+    // The field headers fit.
+    log_internal::AppendTruncated(ch, num, encoded_remaining_copy);
+    EncodeMessageLength(str_start, &encoded_remaining_copy);
+    EncodeMessageLength(value_start, &encoded_remaining_copy);
+    data_->encoded_remaining = encoded_remaining_copy;
+  } else {
+    // The field header(s) did not fit; zero `encoded_remaining` so we don't
+    // write anything else later.
+    data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
+  }
+}
+template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
+    char ch, size_t num);
+template void LogMessage::CopyToEncodedBuffer<
+    LogMessage::StringType::kNotLiteral>(char ch, size_t num);
+
+LogMessageFatal::LogMessageFatal(const char* file, int line)
+    : LogMessage(file, line, absl::LogSeverity::kFatal) {}
+
+LogMessageFatal::LogMessageFatal(const char* file, int line,
+                                 absl::string_view failure_msg)
+    : LogMessage(file, line, absl::LogSeverity::kFatal) {
+  *this << "Check failed: " << failure_msg << " ";
+}
+
+// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
+// disable msvc's warning about the d'tor never returning.
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
+LogMessageFatal::~LogMessageFatal() {
+  Flush();
+  FailWithoutStackTrace();
+}
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(pop)
+#endif
+
+LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line)
+    : LogMessage(file, line, absl::LogSeverity::kFatal) {
+  SetFailQuietly();
+}
+
+LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line,
+                                               absl::string_view failure_msg)
+    : LogMessage(file, line, absl::LogSeverity::kFatal) {
+  SetFailQuietly();
+  *this << "Check failed: " << failure_msg << " ";
+}
+
+// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
+// disable msvc's warning about the d'tor never returning.
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
+LogMessageQuietlyFatal::~LogMessageQuietlyFatal() {
+  Flush();
+  FailQuietly();
+}
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(pop)
+#endif
+
+}  // namespace log_internal
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/log_message.h b/abseil-cpp/absl/log/internal/log_message.h
new file mode 100644
index 0000000..4693772
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_message.h
@@ -0,0 +1,377 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/log_message.h
+// -----------------------------------------------------------------------------
+//
+// This file declares `class absl::log_internal::LogMessage`. This class more or
+// less represents a particular log message. LOG/CHECK macros create a
+// temporary instance of `LogMessage` and then stream values to it.  At the end
+// of the LOG/CHECK statement, LogMessage instance goes out of scope and
+// `~LogMessage` directs the message to the registered log sinks.
+// Heap-allocation of `LogMessage` is unsupported.  Construction outside of a
+// `LOG` macro is unsupported.
+
+#ifndef ABSL_LOG_INTERNAL_LOG_MESSAGE_H_
+#define ABSL_LOG_INTERNAL_LOG_MESSAGE_H_
+
+#include <ios>
+#include <memory>
+#include <ostream>
+#include <streambuf>
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/errno_saver.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/nullguard.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+#include "absl/strings/internal/has_absl_stringify.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+constexpr int kLogMessageBufferSize = 15000;
+
+class LogMessage {
+ public:
+  struct InfoTag {};
+  struct WarningTag {};
+  struct ErrorTag {};
+
+  // Used for `LOG`.
+  LogMessage(const char* file, int line,
+             absl::LogSeverity severity) ABSL_ATTRIBUTE_COLD;
+  // These constructors are slightly smaller/faster to call; the severity is
+  // curried into the function pointer.
+  LogMessage(const char* file, int line,
+             InfoTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
+  LogMessage(const char* file, int line,
+             WarningTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
+  LogMessage(const char* file, int line,
+             ErrorTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
+  LogMessage(const LogMessage&) = delete;
+  LogMessage& operator=(const LogMessage&) = delete;
+  ~LogMessage() ABSL_ATTRIBUTE_COLD;
+
+  // Overrides the location inferred from the callsite.  The string pointed to
+  // by `file` must be valid until the end of the statement.
+  LogMessage& AtLocation(absl::string_view file, int line);
+  // Omits the prefix from this line.  The prefix includes metadata about the
+  // logged data such as source code location and timestamp.
+  LogMessage& NoPrefix();
+  // Sets the verbosity field of the logged message as if it was logged by
+  // `VLOG(verbose_level)`.  Unlike `VLOG`, this method does not affect
+  // evaluation of the statement when the specified `verbose_level` has been
+  // disabled.  The only effect is on `absl::LogSink` implementations which
+  // make use of the `absl::LogSink::verbosity()` value.  The value
+  // `absl::LogEntry::kNoVerbosityLevel` can be specified to mark the message
+  // not verbose.
+  LogMessage& WithVerbosity(int verbose_level);
+  // Uses the specified timestamp instead of one collected in the constructor.
+  LogMessage& WithTimestamp(absl::Time timestamp);
+  // Uses the specified thread ID instead of one collected in the constructor.
+  LogMessage& WithThreadID(absl::LogEntry::tid_t tid);
+  // Copies all metadata (but no data) from the specified `absl::LogEntry`.
+  LogMessage& WithMetadataFrom(const absl::LogEntry& entry);
+  // Appends to the logged message a colon, a space, a textual description of
+  // the current value of `errno` (as by strerror(3)), and the numerical value
+  // of `errno`.
+  LogMessage& WithPerror();
+  // Sends this message to `*sink` in addition to whatever other sinks it would
+  // otherwise have been sent to.  `sink` must not be null.
+  LogMessage& ToSinkAlso(absl::LogSink* sink);
+  // Sends this message to `*sink` and no others.  `sink` must not be null.
+  LogMessage& ToSinkOnly(absl::LogSink* sink);
+
+  // Don't call this method from outside this library.
+  LogMessage& InternalStream() { return *this; }
+
+  // By-value overloads for small, common types let us overlook common failures
+  // to define globals and static data members (i.e. in a .cc file).
+  // clang-format off
+  // The CUDA toolchain cannot handle these <<<'s:
+  LogMessage& operator<<(char v) { return operator<< <char>(v); }
+  LogMessage& operator<<(signed char v) { return operator<< <signed char>(v); }
+  LogMessage& operator<<(unsigned char v) {
+    return operator<< <unsigned char>(v);
+  }
+  LogMessage& operator<<(signed short v) {  // NOLINT
+    return operator<< <signed short>(v);  // NOLINT
+  }
+  LogMessage& operator<<(signed int v) { return operator<< <signed int>(v); }
+  LogMessage& operator<<(signed long v) {  // NOLINT
+    return operator<< <signed long>(v);  // NOLINT
+  }
+  LogMessage& operator<<(signed long long v) {  // NOLINT
+    return operator<< <signed long long>(v);  // NOLINT
+  }
+  LogMessage& operator<<(unsigned short v) {  // NOLINT
+    return operator<< <unsigned short>(v);  // NOLINT
+  }
+  LogMessage& operator<<(unsigned int v) {
+    return operator<< <unsigned int>(v);
+  }
+  LogMessage& operator<<(unsigned long v) {  // NOLINT
+    return operator<< <unsigned long>(v);  // NOLINT
+  }
+  LogMessage& operator<<(unsigned long long v) {  // NOLINT
+    return operator<< <unsigned long long>(v);  // NOLINT
+  }
+  LogMessage& operator<<(void* v) { return operator<< <void*>(v); }
+  LogMessage& operator<<(const void* v) { return operator<< <const void*>(v); }
+  LogMessage& operator<<(float v) { return operator<< <float>(v); }
+  LogMessage& operator<<(double v) { return operator<< <double>(v); }
+  LogMessage& operator<<(bool v) { return operator<< <bool>(v); }
+  // clang-format on
+
+  // These overloads are more efficient since no `ostream` is involved.
+  LogMessage& operator<<(const std::string& v);
+  LogMessage& operator<<(absl::string_view v);
+
+  // Handle stream manipulators e.g. std::endl.
+  LogMessage& operator<<(std::ostream& (*m)(std::ostream& os));
+  LogMessage& operator<<(std::ios_base& (*m)(std::ios_base& os));
+
+  // Literal strings.  This allows us to record C string literals as literals in
+  // the logging.proto.Value.
+  //
+  // Allow this overload to be inlined to prevent generating instantiations of
+  // this template for every value of `SIZE` encountered in each source code
+  // file. That significantly increases linker input sizes. Inlining is cheap
+  // because the argument to this overload is almost always a string literal so
+  // the call to `strlen` can be replaced at compile time. The overload for
+  // `char[]` below should not be inlined. The compiler typically does not have
+  // the string at compile time and cannot replace the call to `strlen` so
+  // inlining it increases the binary size. See the discussion on
+  // cl/107527369.
+  template <int SIZE>
+  LogMessage& operator<<(const char (&buf)[SIZE]);
+
+  // This prevents non-const `char[]` arrays from looking like literals.
+  template <int SIZE>
+  LogMessage& operator<<(char (&buf)[SIZE]) ABSL_ATTRIBUTE_NOINLINE;
+
+  // Types that support `AbslStringify()` are serialized that way.
+  template <typename T,
+            typename std::enable_if<
+                strings_internal::HasAbslStringify<T>::value, int>::type = 0>
+  LogMessage& operator<<(const T& v) ABSL_ATTRIBUTE_NOINLINE;
+
+  // Types that don't support `AbslStringify()` but do support streaming into a
+  // `std::ostream&` are serialized that way.
+  template <typename T,
+            typename std::enable_if<
+                !strings_internal::HasAbslStringify<T>::value, int>::type = 0>
+  LogMessage& operator<<(const T& v) ABSL_ATTRIBUTE_NOINLINE;
+
+  // Note: We explicitly do not support `operator<<` for non-const references
+  // because it breaks logging of non-integer bitfield types (i.e., enums).
+
+ protected:
+  // Call `abort()` or similar to perform `LOG(FATAL)` crash.  It is assumed
+  // that the caller has already generated and written the trace as appropriate.
+  ABSL_ATTRIBUTE_NORETURN static void FailWithoutStackTrace();
+
+  // Similar to `FailWithoutStackTrace()`, but without `abort()`.  Terminates
+  // the process with an error exit code.
+  ABSL_ATTRIBUTE_NORETURN static void FailQuietly();
+
+  // Dispatches the completed `absl::LogEntry` to applicable `absl::LogSink`s.
+  // This might as well be inlined into `~LogMessage` except that
+  // `~LogMessageFatal` needs to call it early.
+  void Flush();
+
+  // After this is called, failures are done as quiet as possible for this log
+  // message.
+  void SetFailQuietly();
+
+ private:
+  struct LogMessageData;  // Opaque type containing message state
+  friend class AsLiteralImpl;
+  friend class StringifySink;
+
+  // This streambuf writes directly into the structured logging buffer so that
+  // arbitrary types can be encoded as string data (using
+  // `operator<<(std::ostream &, ...)` without any extra allocation or copying.
+  // Space is reserved before the data to store the length field, which is
+  // filled in by `~OstreamView`.
+  class OstreamView final : public std::streambuf {
+   public:
+    explicit OstreamView(LogMessageData& message_data);
+    ~OstreamView() override;
+    OstreamView(const OstreamView&) = delete;
+    OstreamView& operator=(const OstreamView&) = delete;
+    std::ostream& stream();
+
+   private:
+    LogMessageData& data_;
+    absl::Span<char> encoded_remaining_copy_;
+    absl::Span<char> message_start_;
+    absl::Span<char> string_start_;
+  };
+
+  enum class StringType {
+    kLiteral,
+    kNotLiteral,
+  };
+  template <StringType str_type>
+  void CopyToEncodedBuffer(absl::string_view str) ABSL_ATTRIBUTE_NOINLINE;
+  template <StringType str_type>
+  void CopyToEncodedBuffer(char ch, size_t num) ABSL_ATTRIBUTE_NOINLINE;
+
+  // Returns `true` if the message is fatal or enabled debug-fatal.
+  bool IsFatal() const;
+
+  // Records some tombstone-type data in anticipation of `Die`.
+  void PrepareToDie();
+  void Die();
+
+  void SendToLog();
+
+  // Checks `FLAGS_log_backtrace_at` and appends a backtrace if appropriate.
+  void LogBacktraceIfNeeded();
+
+  // This should be the first data member so that its initializer captures errno
+  // before any other initializers alter it (e.g. with calls to new) and so that
+  // no other destructors run afterward an alter it (e.g. with calls to delete).
+  absl::base_internal::ErrnoSaver errno_saver_;
+
+  // We keep the data in a separate struct so that each instance of `LogMessage`
+  // uses less stack space.
+  std::unique_ptr<LogMessageData> data_;
+};
+
+// Helper class so that `AbslStringify()` can modify the LogMessage.
+class StringifySink final {
+ public:
+  explicit StringifySink(LogMessage& message) : message_(message) {}
+
+  void Append(size_t count, char ch) {
+    message_.CopyToEncodedBuffer<LogMessage::StringType::kNotLiteral>(ch,
+                                                                      count);
+  }
+
+  void Append(absl::string_view v) {
+    message_.CopyToEncodedBuffer<LogMessage::StringType::kNotLiteral>(v);
+  }
+
+  // For types that implement `AbslStringify` using `absl::Format()`.
+  friend void AbslFormatFlush(StringifySink* sink, absl::string_view v) {
+    sink->Append(v);
+  }
+
+ private:
+  LogMessage& message_;
+};
+
+// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE`
+template <typename T,
+          typename std::enable_if<strings_internal::HasAbslStringify<T>::value,
+                                  int>::type>
+LogMessage& LogMessage::operator<<(const T& v) {
+  StringifySink sink(*this);
+  // Replace with public API.
+  AbslStringify(sink, v);
+  return *this;
+}
+
+// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE`
+template <typename T,
+          typename std::enable_if<!strings_internal::HasAbslStringify<T>::value,
+                                  int>::type>
+LogMessage& LogMessage::operator<<(const T& v) {
+  OstreamView view(*data_);
+  view.stream() << log_internal::NullGuard<T>().Guard(v);
+  return *this;
+}
+
+template <int SIZE>
+LogMessage& LogMessage::operator<<(const char (&buf)[SIZE]) {
+  CopyToEncodedBuffer<StringType::kLiteral>(buf);
+  return *this;
+}
+
+// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE`
+template <int SIZE>
+LogMessage& LogMessage::operator<<(char (&buf)[SIZE]) {
+  CopyToEncodedBuffer<StringType::kNotLiteral>(buf);
+  return *this;
+}
+// We instantiate these specializations in the library's TU to save space in
+// other TUs.  Since the template is marked `ABSL_ATTRIBUTE_NOINLINE` we will be
+// emitting a function call either way.
+extern template LogMessage& LogMessage::operator<<(const char& v);
+extern template LogMessage& LogMessage::operator<<(const signed char& v);
+extern template LogMessage& LogMessage::operator<<(const unsigned char& v);
+extern template LogMessage& LogMessage::operator<<(const short& v);  // NOLINT
+extern template LogMessage& LogMessage::operator<<(
+    const unsigned short& v);  // NOLINT
+extern template LogMessage& LogMessage::operator<<(const int& v);
+extern template LogMessage& LogMessage::operator<<(
+    const unsigned int& v);                                         // NOLINT
+extern template LogMessage& LogMessage::operator<<(const long& v);  // NOLINT
+extern template LogMessage& LogMessage::operator<<(
+    const unsigned long& v);  // NOLINT
+extern template LogMessage& LogMessage::operator<<(
+    const long long& v);  // NOLINT
+extern template LogMessage& LogMessage::operator<<(
+    const unsigned long long& v);  // NOLINT
+extern template LogMessage& LogMessage::operator<<(void* const& v);
+extern template LogMessage& LogMessage::operator<<(const void* const& v);
+extern template LogMessage& LogMessage::operator<<(const float& v);
+extern template LogMessage& LogMessage::operator<<(const double& v);
+extern template LogMessage& LogMessage::operator<<(const bool& v);
+
+extern template void LogMessage::CopyToEncodedBuffer<
+    LogMessage::StringType::kLiteral>(absl::string_view str);
+extern template void LogMessage::CopyToEncodedBuffer<
+    LogMessage::StringType::kNotLiteral>(absl::string_view str);
+extern template void
+LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(char ch,
+                                                                  size_t num);
+extern template void LogMessage::CopyToEncodedBuffer<
+    LogMessage::StringType::kNotLiteral>(char ch, size_t num);
+
+// `LogMessageFatal` ensures the process will exit in failure after logging this
+// message.
+class LogMessageFatal final : public LogMessage {
+ public:
+  LogMessageFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
+  LogMessageFatal(const char* file, int line,
+                  absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD;
+  ABSL_ATTRIBUTE_NORETURN ~LogMessageFatal();
+};
+
+class LogMessageQuietlyFatal final : public LogMessage {
+ public:
+  LogMessageQuietlyFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
+  LogMessageQuietlyFatal(const char* file, int line,
+                         absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD;
+  ABSL_ATTRIBUTE_NORETURN ~LogMessageQuietlyFatal();
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+extern "C" ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalOnFatalLogMessage)(const absl::LogEntry&);
+
+#endif  // ABSL_LOG_INTERNAL_LOG_MESSAGE_H_
diff --git a/abseil-cpp/absl/log/internal/log_sink_set.cc b/abseil-cpp/absl/log/internal/log_sink_set.cc
new file mode 100644
index 0000000..b7cbe36
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_sink_set.cc
@@ -0,0 +1,296 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/log_sink_set.h"
+
+#ifndef ABSL_HAVE_THREAD_LOCAL
+#include <pthread.h>
+#endif
+
+#ifdef __ANDROID__
+#include <android/log.h>
+#endif
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <algorithm>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/log_severity.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/cleanup/cleanup.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/config.h"
+#include "absl/log/internal/globals.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+#include "absl/strings/string_view.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+namespace {
+
+// Returns a mutable reference to a thread-local variable that should be true if
+// a globally-registered `LogSink`'s `Send()` is currently being invoked on this
+// thread.
+bool& ThreadIsLoggingStatus() {
+#ifdef ABSL_HAVE_THREAD_LOCAL
+  ABSL_CONST_INIT thread_local bool thread_is_logging = false;
+  return thread_is_logging;
+#else
+  ABSL_CONST_INIT static pthread_key_t thread_is_logging_key;
+  static const bool unused = [] {
+    if (pthread_key_create(&thread_is_logging_key, [](void* data) {
+          delete reinterpret_cast<bool*>(data);
+        })) {
+      perror("pthread_key_create failed!");
+      abort();
+    }
+    return true;
+  }();
+  (void)unused;  // Fixes -wunused-variable warning
+  bool* thread_is_logging_ptr =
+      reinterpret_cast<bool*>(pthread_getspecific(thread_is_logging_key));
+
+  if (ABSL_PREDICT_FALSE(!thread_is_logging_ptr)) {
+    thread_is_logging_ptr = new bool{false};
+    if (pthread_setspecific(thread_is_logging_key, thread_is_logging_ptr)) {
+      perror("pthread_setspecific failed");
+      abort();
+    }
+  }
+  return *thread_is_logging_ptr;
+#endif
+}
+
+class StderrLogSink final : public LogSink {
+ public:
+  ~StderrLogSink() override = default;
+
+  void Send(const absl::LogEntry& entry) override {
+    if (entry.log_severity() < absl::StderrThreshold() &&
+        absl::log_internal::IsInitialized()) {
+      return;
+    }
+
+    ABSL_CONST_INIT static absl::once_flag warn_if_not_initialized;
+    absl::call_once(warn_if_not_initialized, []() {
+      if (absl::log_internal::IsInitialized()) return;
+      const char w[] =
+          "WARNING: All log messages before absl::InitializeLog() is called"
+          " are written to STDERR\n";
+      absl::log_internal::WriteToStderr(w, absl::LogSeverity::kWarning);
+    });
+
+    if (!entry.stacktrace().empty()) {
+      absl::log_internal::WriteToStderr(entry.stacktrace(),
+                                        entry.log_severity());
+    } else {
+      // TODO(b/226937039): do this outside else condition once we avoid
+      // ReprintFatalMessage
+      absl::log_internal::WriteToStderr(
+          entry.text_message_with_prefix_and_newline(), entry.log_severity());
+    }
+  }
+};
+
+#if defined(__ANDROID__)
+class AndroidLogSink final : public LogSink {
+ public:
+  ~AndroidLogSink() override = default;
+
+  void Send(const absl::LogEntry& entry) override {
+    const int level = AndroidLogLevel(entry);
+    const char* const tag = GetAndroidNativeTag();
+    __android_log_write(level, tag,
+                        entry.text_message_with_prefix_and_newline_c_str());
+    if (entry.log_severity() == absl::LogSeverity::kFatal)
+      __android_log_write(ANDROID_LOG_FATAL, tag, "terminating.\n");
+  }
+
+ private:
+  static int AndroidLogLevel(const absl::LogEntry& entry) {
+    switch (entry.log_severity()) {
+      case absl::LogSeverity::kFatal:
+        return ANDROID_LOG_FATAL;
+      case absl::LogSeverity::kError:
+        return ANDROID_LOG_ERROR;
+      case absl::LogSeverity::kWarning:
+        return ANDROID_LOG_WARN;
+      default:
+        if (entry.verbosity() >= 2) return ANDROID_LOG_VERBOSE;
+        if (entry.verbosity() == 1) return ANDROID_LOG_DEBUG;
+        return ANDROID_LOG_INFO;
+    }
+  }
+};
+#endif  // !defined(__ANDROID__)
+
+#if defined(_WIN32)
+class WindowsDebuggerLogSink final : public LogSink {
+ public:
+  ~WindowsDebuggerLogSink() override = default;
+
+  void Send(const absl::LogEntry& entry) override {
+    if (entry.log_severity() < absl::StderrThreshold() &&
+        absl::log_internal::IsInitialized()) {
+      return;
+    }
+    ::OutputDebugStringA(entry.text_message_with_prefix_and_newline_c_str());
+  }
+};
+#endif  // !defined(_WIN32)
+
+class GlobalLogSinkSet final {
+ public:
+  GlobalLogSinkSet() {
+#if defined(__myriad2__) || defined(__Fuchsia__)
+    // myriad2 and Fuchsia do not log to stderr by default.
+#else
+    static StderrLogSink* stderr_log_sink = new StderrLogSink;
+    AddLogSink(stderr_log_sink);
+#endif
+#ifdef __ANDROID__
+    static AndroidLogSink* android_log_sink = new AndroidLogSink;
+    AddLogSink(android_log_sink);
+#endif
+#if defined(_WIN32)
+    static WindowsDebuggerLogSink* debugger_log_sink =
+        new WindowsDebuggerLogSink;
+    AddLogSink(debugger_log_sink);
+#endif  // !defined(_WIN32)
+  }
+
+  void LogToSinks(const absl::LogEntry& entry,
+                  absl::Span<absl::LogSink*> extra_sinks, bool extra_sinks_only)
+      ABSL_LOCKS_EXCLUDED(guard_) {
+    SendToSinks(entry, extra_sinks);
+
+    if (!extra_sinks_only) {
+      if (ThreadIsLoggingToLogSink()) {
+        absl::log_internal::WriteToStderr(
+            entry.text_message_with_prefix_and_newline(), entry.log_severity());
+      } else {
+        absl::ReaderMutexLock global_sinks_lock(&guard_);
+        ThreadIsLoggingStatus() = true;
+        // Ensure the "thread is logging" status is reverted upon leaving the
+        // scope even in case of exceptions.
+        auto status_cleanup =
+            absl::MakeCleanup([] { ThreadIsLoggingStatus() = false; });
+        SendToSinks(entry, absl::MakeSpan(sinks_));
+      }
+    }
+  }
+
+  void AddLogSink(absl::LogSink* sink) ABSL_LOCKS_EXCLUDED(guard_) {
+    {
+      absl::WriterMutexLock global_sinks_lock(&guard_);
+      auto pos = std::find(sinks_.begin(), sinks_.end(), sink);
+      if (pos == sinks_.end()) {
+        sinks_.push_back(sink);
+        return;
+      }
+    }
+    ABSL_INTERNAL_LOG(FATAL, "Duplicate log sinks are not supported");
+  }
+
+  void RemoveLogSink(absl::LogSink* sink) ABSL_LOCKS_EXCLUDED(guard_) {
+    {
+      absl::WriterMutexLock global_sinks_lock(&guard_);
+      auto pos = std::find(sinks_.begin(), sinks_.end(), sink);
+      if (pos != sinks_.end()) {
+        sinks_.erase(pos);
+        return;
+      }
+    }
+    ABSL_INTERNAL_LOG(FATAL, "Mismatched log sink being removed");
+  }
+
+  void FlushLogSinks() ABSL_LOCKS_EXCLUDED(guard_) {
+    if (ThreadIsLoggingToLogSink()) {
+      // The thread_local condition demonstrates that we're already holding the
+      // lock in order to iterate over `sinks_` for dispatch.  The thread-safety
+      // annotations don't know this, so we use `ABSL_NO_THREAD_SAFETY_ANALYSIS`
+      guard_.AssertReaderHeld();
+      FlushLogSinksLocked();
+    } else {
+      absl::ReaderMutexLock global_sinks_lock(&guard_);
+      // In case if LogSink::Flush overload decides to log
+      ThreadIsLoggingStatus() = true;
+      // Ensure the "thread is logging" status is reverted upon leaving the
+      // scope even in case of exceptions.
+      auto status_cleanup =
+          absl::MakeCleanup([] { ThreadIsLoggingStatus() = false; });
+      FlushLogSinksLocked();
+    }
+  }
+
+ private:
+  void FlushLogSinksLocked() ABSL_SHARED_LOCKS_REQUIRED(guard_) {
+    for (absl::LogSink* sink : sinks_) {
+      sink->Flush();
+    }
+  }
+
+  // Helper routine for LogToSinks.
+  static void SendToSinks(const absl::LogEntry& entry,
+                          absl::Span<absl::LogSink*> sinks) {
+    for (absl::LogSink* sink : sinks) {
+      sink->Send(entry);
+    }
+  }
+
+  using LogSinksSet = std::vector<absl::LogSink*>;
+  absl::Mutex guard_;
+  LogSinksSet sinks_ ABSL_GUARDED_BY(guard_);
+};
+
+// Returns reference to the global LogSinks set.
+GlobalLogSinkSet& GlobalSinks() {
+  static GlobalLogSinkSet* global_sinks = new GlobalLogSinkSet;
+  return *global_sinks;
+}
+
+}  // namespace
+
+bool ThreadIsLoggingToLogSink() { return ThreadIsLoggingStatus(); }
+
+void LogToSinks(const absl::LogEntry& entry,
+                absl::Span<absl::LogSink*> extra_sinks, bool extra_sinks_only) {
+  log_internal::GlobalSinks().LogToSinks(entry, extra_sinks, extra_sinks_only);
+}
+
+void AddLogSink(absl::LogSink* sink) {
+  log_internal::GlobalSinks().AddLogSink(sink);
+}
+
+void RemoveLogSink(absl::LogSink* sink) {
+  log_internal::GlobalSinks().RemoveLogSink(sink);
+}
+
+void FlushLogSinks() { log_internal::GlobalSinks().FlushLogSinks(); }
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/log_sink_set.h b/abseil-cpp/absl/log/internal/log_sink_set.h
new file mode 100644
index 0000000..88ab073
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/log_sink_set.h
@@ -0,0 +1,54 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/log_sink_set.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_LOG_INTERNAL_LOG_SINK_SET_H_
+#define ABSL_LOG_INTERNAL_LOG_SINK_SET_H_
+
+#include "absl/base/config.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// Returns true if a globally-registered `LogSink`'s `Send()` is currently
+// being invoked on this thread.
+bool ThreadIsLoggingToLogSink();
+
+// This function may log to two sets of sinks:
+//
+// * If `extra_sinks_only` is true, it will dispatch only to `extra_sinks`.
+//   `LogMessage::ToSinkAlso` and `LogMessage::ToSinkOnly` are used to attach
+//    extra sinks to the entry.
+// * Otherwise it will also log to the global sinks set. This set is managed
+//   by `absl::AddLogSink` and `absl::RemoveLogSink`.
+void LogToSinks(const absl::LogEntry& entry,
+                absl::Span<absl::LogSink*> extra_sinks, bool extra_sinks_only);
+
+// Implementation for operations with log sink set.
+void AddLogSink(absl::LogSink* sink);
+void RemoveLogSink(absl::LogSink* sink);
+void FlushLogSinks();
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_LOG_SINK_SET_H_
diff --git a/abseil-cpp/absl/log/internal/nullguard.cc b/abseil-cpp/absl/log/internal/nullguard.cc
new file mode 100644
index 0000000..3296c01
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/nullguard.cc
@@ -0,0 +1,35 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/nullguard.h"
+
+#include <array>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+ABSL_CONST_INIT ABSL_DLL const std::array<char, 7> kCharNull{
+    {'(', 'n', 'u', 'l', 'l', ')', '\0'}};
+ABSL_CONST_INIT ABSL_DLL const std::array<signed char, 7> kSignedCharNull{
+    {'(', 'n', 'u', 'l', 'l', ')', '\0'}};
+ABSL_CONST_INIT ABSL_DLL const std::array<unsigned char, 7> kUnsignedCharNull{
+    {'(', 'n', 'u', 'l', 'l', ')', '\0'}};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/nullguard.h b/abseil-cpp/absl/log/internal/nullguard.h
new file mode 100644
index 0000000..623943c
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/nullguard.h
@@ -0,0 +1,88 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/nullguard.h
+// -----------------------------------------------------------------------------
+//
+// NullGuard exists such that NullGuard<T>::Guard(v) returns v, unless passed a
+// nullptr_t, or a null char* or const char*, in which case it returns "(null)".
+// This allows streaming NullGuard<T>::Guard(v) to an output stream without
+// hitting undefined behavior for null values.
+
+#ifndef ABSL_LOG_INTERNAL_NULLGUARD_H_
+#define ABSL_LOG_INTERNAL_NULLGUARD_H_
+
+#include <array>
+#include <cstddef>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+ABSL_CONST_INIT ABSL_DLL extern const std::array<char, 7> kCharNull;
+ABSL_CONST_INIT ABSL_DLL extern const std::array<signed char, 7>
+    kSignedCharNull;
+ABSL_CONST_INIT ABSL_DLL extern const std::array<unsigned char, 7>
+    kUnsignedCharNull;
+
+template <typename T>
+struct NullGuard final {
+  static const T& Guard(const T& v) { return v; }
+};
+template <>
+struct NullGuard<char*> final {
+  static const char* Guard(const char* v) { return v ? v : kCharNull.data(); }
+};
+template <>
+struct NullGuard<const char*> final {
+  static const char* Guard(const char* v) { return v ? v : kCharNull.data(); }
+};
+template <>
+struct NullGuard<signed char*> final {
+  static const signed char* Guard(const signed char* v) {
+    return v ? v : kSignedCharNull.data();
+  }
+};
+template <>
+struct NullGuard<const signed char*> final {
+  static const signed char* Guard(const signed char* v) {
+    return v ? v : kSignedCharNull.data();
+  }
+};
+template <>
+struct NullGuard<unsigned char*> final {
+  static const unsigned char* Guard(const unsigned char* v) {
+    return v ? v : kUnsignedCharNull.data();
+  }
+};
+template <>
+struct NullGuard<const unsigned char*> final {
+  static const unsigned char* Guard(const unsigned char* v) {
+    return v ? v : kUnsignedCharNull.data();
+  }
+};
+template <>
+struct NullGuard<std::nullptr_t> final {
+  static const char* Guard(const std::nullptr_t&) { return kCharNull.data(); }
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_NULLGUARD_H_
diff --git a/abseil-cpp/absl/log/internal/nullstream.h b/abseil-cpp/absl/log/internal/nullstream.h
new file mode 100644
index 0000000..9266852
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/nullstream.h
@@ -0,0 +1,136 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/nullstream.h
+// -----------------------------------------------------------------------------
+//
+// Classes `NullStream`, `NullStreamMaybeFatal ` and `NullStreamFatal`
+// implement a subset of the `LogMessage` API and are used instead when logging
+// of messages has been disabled.
+
+#ifndef ABSL_LOG_INTERNAL_NULLSTREAM_H_
+#define ABSL_LOG_INTERNAL_NULLSTREAM_H_
+
+#ifdef _WIN32
+#include <cstdlib>
+#else
+#include <unistd.h>
+#endif
+#include <ios>
+#include <ostream>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// A `NullStream` implements the API of `LogMessage` (a few methods and
+// `operator<<`) but does nothing.  All methods are defined inline so the
+// compiler can eliminate the whole instance and discard anything that's
+// streamed in.
+class NullStream {
+ public:
+  NullStream& AtLocation(absl::string_view, int) { return *this; }
+  template <typename SourceLocationType>
+  NullStream& AtLocation(SourceLocationType) {
+    return *this;
+  }
+  NullStream& NoPrefix() { return *this; }
+  NullStream& WithVerbosity(int) { return *this; }
+  template <typename TimeType>
+  NullStream& WithTimestamp(TimeType) {
+    return *this;
+  }
+  template <typename Tid>
+  NullStream& WithThreadID(Tid) {
+    return *this;
+  }
+  template <typename LogEntryType>
+  NullStream& WithMetadataFrom(const LogEntryType&) {
+    return *this;
+  }
+  NullStream& WithPerror() { return *this; }
+  template <typename LogSinkType>
+  NullStream& ToSinkAlso(LogSinkType*) {
+    return *this;
+  }
+  template <typename LogSinkType>
+  NullStream& ToSinkOnly(LogSinkType*) {
+    return *this;
+  }
+  template <typename LogSinkType>
+  NullStream& OutputToSink(LogSinkType*, bool) {
+    return *this;
+  }
+  NullStream& InternalStream() { return *this; }
+};
+template <typename T>
+inline NullStream& operator<<(NullStream& str, const T&) {
+  return str;
+}
+inline NullStream& operator<<(NullStream& str,
+                              std::ostream& (*)(std::ostream& os)) {
+  return str;
+}
+inline NullStream& operator<<(NullStream& str,
+                              std::ios_base& (*)(std::ios_base& os)) {
+  return str;
+}
+
+// `NullStreamMaybeFatal` implements the process termination semantics of
+// `LogMessage`, which is used for `DFATAL` severity and expression-defined
+// severity e.g. `LOG(LEVEL(HowBadIsIt()))`.  Like `LogMessage`, it terminates
+// the process when destroyed if the passed-in severity equals `FATAL`.
+class NullStreamMaybeFatal final : public NullStream {
+ public:
+  explicit NullStreamMaybeFatal(absl::LogSeverity severity)
+      : fatal_(severity == absl::LogSeverity::kFatal) {}
+  ~NullStreamMaybeFatal() {
+    if (fatal_) {
+      _exit(1);
+    }
+  }
+
+ private:
+  bool fatal_;
+};
+
+// `NullStreamFatal` implements the process termination semantics of
+// `LogMessageFatal`, which means it always terminates the process.  `DFATAL`
+// and expression-defined severity use `NullStreamMaybeFatal` above.
+class NullStreamFatal final : public NullStream {
+ public:
+  NullStreamFatal() = default;
+  // ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
+  // disable msvc's warning about the d'tor never returning.
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
+  ABSL_ATTRIBUTE_NORETURN ~NullStreamFatal() { _exit(1); }
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(pop)
+#endif
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_GLOBALS_H_
diff --git a/abseil-cpp/absl/log/internal/proto.cc b/abseil-cpp/absl/log/internal/proto.cc
new file mode 100644
index 0000000..eb699ae
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/proto.cc
@@ -0,0 +1,220 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/proto.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+namespace {
+void EncodeRawVarint(uint64_t value, size_t size, absl::Span<char> *buf) {
+  for (size_t s = 0; s < size; s++) {
+    (*buf)[s] = static_cast<char>((value & 0x7f) | (s + 1 == size ? 0 : 0x80));
+    value >>= 7;
+  }
+  buf->remove_prefix(size);
+}
+constexpr uint64_t MakeTagType(uint64_t tag, WireType type) {
+  return tag << 3 | static_cast<uint64_t>(type);
+}
+}  // namespace
+
+bool EncodeVarint(uint64_t tag, uint64_t value, absl::Span<char> *buf) {
+  const uint64_t tag_type = MakeTagType(tag, WireType::kVarint);
+  const size_t tag_type_size = VarintSize(tag_type);
+  const size_t value_size = VarintSize(value);
+  if (tag_type_size + value_size > buf->size()) {
+    buf->remove_suffix(buf->size());
+    return false;
+  }
+  EncodeRawVarint(tag_type, tag_type_size, buf);
+  EncodeRawVarint(value, value_size, buf);
+  return true;
+}
+
+bool Encode64Bit(uint64_t tag, uint64_t value, absl::Span<char> *buf) {
+  const uint64_t tag_type = MakeTagType(tag, WireType::k64Bit);
+  const size_t tag_type_size = VarintSize(tag_type);
+  if (tag_type_size + sizeof(value) > buf->size()) {
+    buf->remove_suffix(buf->size());
+    return false;
+  }
+  EncodeRawVarint(tag_type, tag_type_size, buf);
+  for (size_t s = 0; s < sizeof(value); s++) {
+    (*buf)[s] = static_cast<char>(value & 0xff);
+    value >>= 8;
+  }
+  buf->remove_prefix(sizeof(value));
+  return true;
+}
+
+bool Encode32Bit(uint64_t tag, uint32_t value, absl::Span<char> *buf) {
+  const uint64_t tag_type = MakeTagType(tag, WireType::k32Bit);
+  const size_t tag_type_size = VarintSize(tag_type);
+  if (tag_type_size + sizeof(value) > buf->size()) {
+    buf->remove_suffix(buf->size());
+    return false;
+  }
+  EncodeRawVarint(tag_type, tag_type_size, buf);
+  for (size_t s = 0; s < sizeof(value); s++) {
+    (*buf)[s] = static_cast<char>(value & 0xff);
+    value >>= 8;
+  }
+  buf->remove_prefix(sizeof(value));
+  return true;
+}
+
+bool EncodeBytes(uint64_t tag, absl::Span<const char> value,
+                 absl::Span<char> *buf) {
+  const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited);
+  const size_t tag_type_size = VarintSize(tag_type);
+  uint64_t length = value.size();
+  const size_t length_size = VarintSize(length);
+  if (tag_type_size + length_size + value.size() > buf->size()) {
+    buf->remove_suffix(buf->size());
+    return false;
+  }
+  EncodeRawVarint(tag_type, tag_type_size, buf);
+  EncodeRawVarint(length, length_size, buf);
+  memcpy(buf->data(), value.data(), value.size());
+  buf->remove_prefix(value.size());
+  return true;
+}
+
+bool EncodeBytesTruncate(uint64_t tag, absl::Span<const char> value,
+                         absl::Span<char> *buf) {
+  const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited);
+  const size_t tag_type_size = VarintSize(tag_type);
+  uint64_t length = value.size();
+  const size_t length_size =
+      VarintSize(std::min<uint64_t>(length, buf->size()));
+  if (tag_type_size + length_size <= buf->size() &&
+      tag_type_size + length_size + value.size() > buf->size()) {
+    value.remove_suffix(tag_type_size + length_size + value.size() -
+                        buf->size());
+    length = value.size();
+  }
+  if (tag_type_size + length_size + value.size() > buf->size()) {
+    buf->remove_suffix(buf->size());
+    return false;
+  }
+  EncodeRawVarint(tag_type, tag_type_size, buf);
+  EncodeRawVarint(length, length_size, buf);
+  memcpy(buf->data(), value.data(), value.size());
+  buf->remove_prefix(value.size());
+  return true;
+}
+
+ABSL_MUST_USE_RESULT absl::Span<char> EncodeMessageStart(
+    uint64_t tag, uint64_t max_size, absl::Span<char> *buf) {
+  const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited);
+  const size_t tag_type_size = VarintSize(tag_type);
+  max_size = std::min<uint64_t>(max_size, buf->size());
+  const size_t length_size = VarintSize(max_size);
+  if (tag_type_size + length_size > buf->size()) {
+    buf->remove_suffix(buf->size());
+    return absl::Span<char>();
+  }
+  EncodeRawVarint(tag_type, tag_type_size, buf);
+  const absl::Span<char> ret = buf->subspan(0, length_size);
+  EncodeRawVarint(0, length_size, buf);
+  return ret;
+}
+
+void EncodeMessageLength(absl::Span<char> msg, const absl::Span<char> *buf) {
+  if (!msg.data()) return;
+  assert(buf->data() >= msg.data());
+  if (buf->data() < msg.data()) return;
+  EncodeRawVarint(
+      static_cast<uint64_t>(buf->data() - (msg.data() + msg.size())),
+      msg.size(), &msg);
+}
+
+namespace {
+uint64_t DecodeVarint(absl::Span<const char> *buf) {
+  uint64_t value = 0;
+  size_t s = 0;
+  while (s < buf->size()) {
+    value |= static_cast<uint64_t>(static_cast<unsigned char>((*buf)[s]) & 0x7f)
+             << 7 * s;
+    if (!((*buf)[s++] & 0x80)) break;
+  }
+  buf->remove_prefix(s);
+  return value;
+}
+
+uint64_t Decode64Bit(absl::Span<const char> *buf) {
+  uint64_t value = 0;
+  size_t s = 0;
+  while (s < buf->size()) {
+    value |= static_cast<uint64_t>(static_cast<unsigned char>((*buf)[s]))
+             << 8 * s;
+    if (++s == sizeof(value)) break;
+  }
+  buf->remove_prefix(s);
+  return value;
+}
+
+uint32_t Decode32Bit(absl::Span<const char> *buf) {
+  uint32_t value = 0;
+  size_t s = 0;
+  while (s < buf->size()) {
+    value |= static_cast<uint32_t>(static_cast<unsigned char>((*buf)[s]))
+             << 8 * s;
+    if (++s == sizeof(value)) break;
+  }
+  buf->remove_prefix(s);
+  return value;
+}
+}  // namespace
+
+bool ProtoField::DecodeFrom(absl::Span<const char> *data) {
+  if (data->empty()) return false;
+  const uint64_t tag_type = DecodeVarint(data);
+  tag_ = tag_type >> 3;
+  type_ = static_cast<WireType>(tag_type & 0x07);
+  switch (type_) {
+    case WireType::kVarint:
+      value_ = DecodeVarint(data);
+      break;
+    case WireType::k64Bit:
+      value_ = Decode64Bit(data);
+      break;
+    case WireType::kLengthDelimited: {
+      value_ = DecodeVarint(data);
+      data_ = data->subspan(
+          0, static_cast<size_t>(std::min<uint64_t>(value_, data->size())));
+      data->remove_prefix(data_.size());
+      break;
+    }
+    case WireType::k32Bit:
+      value_ = Decode32Bit(data);
+      break;
+  }
+  return true;
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/proto.h b/abseil-cpp/absl/log/internal/proto.h
new file mode 100644
index 0000000..c8d14ac
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/proto.h
@@ -0,0 +1,288 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// -----------------------------------------------------------------------------
+// File: internal/proto.h
+// -----------------------------------------------------------------------------
+//
+// Declares functions for serializing and deserializing data to and from memory
+// buffers in protocol buffer wire format.  This library takes no steps to
+// ensure that the encoded data matches with any message specification.
+
+#ifndef ABSL_LOG_INTERNAL_PROTO_H_
+#define ABSL_LOG_INTERNAL_PROTO_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/casts.h"
+#include "absl/base/config.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// absl::Span<char> represents a view into the available space in a mutable
+// buffer during encoding.  Encoding functions shrink the span as they go so
+// that the same view can be passed to a series of Encode functions.  If the
+// data do not fit, nothing is encoded, the view is set to size zero (so that
+// all subsequent encode calls fail), and false is returned.  Otherwise true is
+// returned.
+
+// In particular, attempting to encode a series of data into an insufficient
+// buffer has consistent and efficient behavior without any caller-side error
+// checking.  Individual values will be encoded in their entirety or not at all
+// (unless one of the `Truncate` functions is used).  Once a value is omitted
+// because it does not fit, no subsequent values will be encoded to preserve
+// ordering; the decoded sequence will be a prefix of the original sequence.
+
+// There are two ways to encode a message-typed field:
+//
+// * Construct its contents in a separate buffer and use `EncodeBytes` to copy
+//   it into the primary buffer with type, tag, and length.
+// * Use `EncodeMessageStart` to write type and tag fields and reserve space for
+//   the length field, then encode the contents directly into the buffer, then
+//   use `EncodeMessageLength` to write the actual length into the reserved
+//   bytes.  This works fine if the actual length takes fewer bytes to encode
+//   than were reserved, although you don't get your extra bytes back.
+//   This approach will always produce a valid encoding, but your protocol may
+//   require that the whole message field by omitted if the buffer is too small
+//   to contain all desired subfields.  In this case, operate on a copy of the
+//   buffer view and assign back only if everything fit, i.e. if the last
+//   `Encode` call returned true.
+
+// Encodes the specified integer as a varint field and returns true if it fits.
+// Used for int32_t, int64_t, uint32_t, uint64_t, bool, and enum field types.
+// Consumes up to kMaxVarintSize * 2 bytes (20).
+bool EncodeVarint(uint64_t tag, uint64_t value, absl::Span<char> *buf);
+inline bool EncodeVarint(uint64_t tag, int64_t value, absl::Span<char> *buf) {
+  return EncodeVarint(tag, static_cast<uint64_t>(value), buf);
+}
+inline bool EncodeVarint(uint64_t tag, uint32_t value, absl::Span<char> *buf) {
+  return EncodeVarint(tag, static_cast<uint64_t>(value), buf);
+}
+inline bool EncodeVarint(uint64_t tag, int32_t value, absl::Span<char> *buf) {
+  return EncodeVarint(tag, static_cast<uint64_t>(value), buf);
+}
+
+// Encodes the specified integer as a varint field using ZigZag encoding and
+// returns true if it fits.
+// Used for sint32 and sint64 field types.
+// Consumes up to kMaxVarintSize * 2 bytes (20).
+inline bool EncodeVarintZigZag(uint64_t tag, int64_t value,
+                               absl::Span<char> *buf) {
+  if (value < 0)
+    return EncodeVarint(tag, 2 * static_cast<uint64_t>(-(value + 1)) + 1, buf);
+  return EncodeVarint(tag, 2 * static_cast<uint64_t>(value), buf);
+}
+
+// Encodes the specified integer as a 64-bit field and returns true if it fits.
+// Used for fixed64 and sfixed64 field types.
+// Consumes up to kMaxVarintSize + 8 bytes (18).
+bool Encode64Bit(uint64_t tag, uint64_t value, absl::Span<char> *buf);
+inline bool Encode64Bit(uint64_t tag, int64_t value, absl::Span<char> *buf) {
+  return Encode64Bit(tag, static_cast<uint64_t>(value), buf);
+}
+inline bool Encode64Bit(uint64_t tag, uint32_t value, absl::Span<char> *buf) {
+  return Encode64Bit(tag, static_cast<uint64_t>(value), buf);
+}
+inline bool Encode64Bit(uint64_t tag, int32_t value, absl::Span<char> *buf) {
+  return Encode64Bit(tag, static_cast<uint64_t>(value), buf);
+}
+
+// Encodes the specified double as a 64-bit field and returns true if it fits.
+// Used for double field type.
+// Consumes up to kMaxVarintSize + 8 bytes (18).
+inline bool EncodeDouble(uint64_t tag, double value, absl::Span<char> *buf) {
+  return Encode64Bit(tag, absl::bit_cast<uint64_t>(value), buf);
+}
+
+// Encodes the specified integer as a 32-bit field and returns true if it fits.
+// Used for fixed32 and sfixed32 field types.
+// Consumes up to kMaxVarintSize + 4 bytes (14).
+bool Encode32Bit(uint64_t tag, uint32_t value, absl::Span<char> *buf);
+inline bool Encode32Bit(uint64_t tag, int32_t value, absl::Span<char> *buf) {
+  return Encode32Bit(tag, static_cast<uint32_t>(value), buf);
+}
+
+// Encodes the specified float as a 32-bit field and returns true if it fits.
+// Used for float field type.
+// Consumes up to kMaxVarintSize + 4 bytes (14).
+inline bool EncodeFloat(uint64_t tag, float value, absl::Span<char> *buf) {
+  return Encode32Bit(tag, absl::bit_cast<uint32_t>(value), buf);
+}
+
+// Encodes the specified bytes as a length-delimited field and returns true if
+// they fit.
+// Used for string, bytes, message, and packed-repeated field type.
+// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()).
+bool EncodeBytes(uint64_t tag, absl::Span<const char> value,
+                 absl::Span<char> *buf);
+
+// Encodes as many of the specified bytes as will fit as a length-delimited
+// field and returns true as long as the field header (`tag_type` and `length`)
+// fits.
+// Used for string, bytes, message, and packed-repeated field type.
+// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()).
+bool EncodeBytesTruncate(uint64_t tag, absl::Span<const char> value,
+                         absl::Span<char> *buf);
+
+// Encodes the specified string as a length-delimited field and returns true if
+// it fits.
+// Used for string, bytes, message, and packed-repeated field type.
+// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()).
+inline bool EncodeString(uint64_t tag, absl::string_view value,
+                         absl::Span<char> *buf) {
+  return EncodeBytes(tag, value, buf);
+}
+
+// Encodes as much of the specified string as will fit as a length-delimited
+// field and returns true as long as the field header (`tag_type` and `length`)
+// fits.
+// Used for string, bytes, message, and packed-repeated field type.
+// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()).
+inline bool EncodeStringTruncate(uint64_t tag, absl::string_view value,
+                                 absl::Span<char> *buf) {
+  return EncodeBytesTruncate(tag, value, buf);
+}
+
+// Encodes the header for a length-delimited field containing up to `max_size`
+// bytes or the number remaining in the buffer, whichever is less.  If the
+// header fits, a non-nullptr `Span` is returned; this must be passed to
+// `EncodeMessageLength` after all contents are encoded to finalize the length
+// field.  If the header does not fit, a nullptr `Span` is returned which is
+// safe to pass to `EncodeMessageLength` but need not be.
+// Used for string, bytes, message, and packed-repeated field type.
+// Consumes up to kMaxVarintSize * 2 bytes (20).
+ABSL_MUST_USE_RESULT absl::Span<char> EncodeMessageStart(uint64_t tag,
+                                                         uint64_t max_size,
+                                                         absl::Span<char> *buf);
+
+// Finalizes the length field in `msg` so that it encompasses all data encoded
+// since the call to `EncodeMessageStart` which returned `msg`.  Does nothing if
+// `msg` is a `nullptr` `Span`.
+void EncodeMessageLength(absl::Span<char> msg, const absl::Span<char> *buf);
+
+enum class WireType : uint64_t {
+  kVarint = 0,
+  k64Bit = 1,
+  kLengthDelimited = 2,
+  k32Bit = 5,
+};
+
+constexpr size_t VarintSize(uint64_t value) {
+  return value < 128 ? 1 : 1 + VarintSize(value >> 7);
+}
+constexpr size_t MinVarintSize() {
+  return VarintSize((std::numeric_limits<uint64_t>::min)());
+}
+constexpr size_t MaxVarintSize() {
+  return VarintSize((std::numeric_limits<uint64_t>::max)());
+}
+
+constexpr uint64_t MaxVarintForSize(size_t size) {
+  return size >= 10 ? (std::numeric_limits<uint64_t>::max)()
+                    : (static_cast<uint64_t>(1) << size * 7) - 1;
+}
+
+// `BufferSizeFor` returns a number of bytes guaranteed to be sufficient to
+// store encoded fields of the specified WireTypes regardless of tag numbers and
+// data values.  This only makes sense for `WireType::kLengthDelimited` if you
+// add in the length of the contents yourself, e.g. for string and bytes fields
+// by adding the lengths of any encoded strings to the return value or for
+// submessage fields by enumerating the fields you may encode into their
+// contents.
+constexpr size_t BufferSizeFor() { return 0; }
+template <typename... T>
+constexpr size_t BufferSizeFor(WireType type, T... tail) {
+  // tag_type + data + ...
+  return MaxVarintSize() +
+         (type == WireType::kVarint ? MaxVarintSize() :              //
+              type == WireType::k64Bit ? 8 :                         //
+                  type == WireType::k32Bit ? 4 : MaxVarintSize()) +  //
+         BufferSizeFor(tail...);
+}
+
+// absl::Span<const char> represents a view into the un-processed space in a
+// buffer during decoding.  Decoding functions shrink the span as they go so
+// that the same view can be decoded iteratively until all data are processed.
+// In general, if the buffer is exhausted but additional bytes are expected by
+// the decoder, it will return values as if the additional bytes were zeros.
+// Length-delimited fields are an exception - if the encoded length field
+// indicates more data bytes than are available in the buffer, the `bytes_value`
+// and `string_value` accessors will return truncated views.
+
+class ProtoField final {
+ public:
+  // Consumes bytes from `data` and returns true if there were any bytes to
+  // decode.
+  bool DecodeFrom(absl::Span<const char> *data);
+  uint64_t tag() const { return tag_; }
+  WireType type() const { return type_; }
+
+  // These value accessors will return nonsense if the data were not encoded in
+  // the corresponding wiretype from the corresponding C++ (or other language)
+  // type.
+
+  double double_value() const { return absl::bit_cast<double>(value_); }
+  float float_value() const {
+    return absl::bit_cast<float>(static_cast<uint32_t>(value_));
+  }
+  int32_t int32_value() const { return static_cast<int32_t>(value_); }
+  int64_t int64_value() const { return static_cast<int64_t>(value_); }
+  int32_t sint32_value() const {
+    if (value_ % 2) return static_cast<int32_t>(0 - ((value_ - 1) / 2) - 1);
+    return static_cast<int32_t>(value_ / 2);
+  }
+  int64_t sint64_value() const {
+    if (value_ % 2) return 0 - ((value_ - 1) / 2) - 1;
+    return value_ / 2;
+  }
+  uint32_t uint32_value() const { return static_cast<uint32_t>(value_); }
+  uint64_t uint64_value() const { return value_; }
+  bool bool_value() const { return value_ != 0; }
+  // To decode an enum, call int32_value() and cast to the appropriate type.
+  // Note that the official C++ proto compiler treats enum fields with values
+  // that do not correspond to a defined enumerator as unknown fields.
+
+  // To decode fields within a submessage field, call
+  // `DecodeNextField(field.BytesValue())`.
+  absl::Span<const char> bytes_value() const { return data_; }
+  absl::string_view string_value() const {
+    const auto data = bytes_value();
+    return absl::string_view(data.data(), data.size());
+  }
+  // Returns the encoded length of a length-delimited field.  This equals
+  // `bytes_value().size()` except when the latter has been truncated due to
+  // buffer underrun.
+  uint64_t encoded_length() const { return value_; }
+
+ private:
+  uint64_t tag_;
+  WireType type_;
+  // For `kTypeVarint`, `kType64Bit`, and `kType32Bit`, holds the decoded value.
+  // For `kTypeLengthDelimited`, holds the decoded length.
+  uint64_t value_;
+  absl::Span<const char> data_;
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_PROTO_H_
diff --git a/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc b/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc
new file mode 100644
index 0000000..763690d
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc
@@ -0,0 +1,105 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdlib.h>
+
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/log.h"
+
+namespace {
+using ::testing::AllOf;
+using ::testing::HasSubstr;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+MATCHER_P2(HasSubstrTimes, substr, expected_count, "") {
+  int count = 0;
+  std::string::size_type pos = 0;
+  std::string needle(substr);
+  while ((pos = arg.find(needle, pos)) != std::string::npos) {
+    ++count;
+    pos += needle.size();
+  }
+
+  return count == expected_count;
+}
+
+TEST(StderrLogSinkDeathTest, InfoMessagesInStderr) {
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo);
+        LOG(INFO) << "INFO message";
+        exit(1);
+      },
+      "INFO message");
+}
+
+TEST(StderrLogSinkDeathTest, WarningMessagesInStderr) {
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo);
+        LOG(WARNING) << "WARNING message";
+        exit(1);
+      },
+      "WARNING message");
+}
+
+TEST(StderrLogSinkDeathTest, ErrorMessagesInStderr) {
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo);
+        LOG(ERROR) << "ERROR message";
+        exit(1);
+      },
+      "ERROR message");
+}
+
+TEST(StderrLogSinkDeathTest, FatalMessagesInStderr) {
+  char message[] = "FATAL message";
+  char stacktrace[] = "*** Check failure stack trace: ***";
+
+  int expected_count = 1;
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo);
+        LOG(FATAL) << message;
+      },
+      AllOf(HasSubstrTimes(message, expected_count), HasSubstr(stacktrace)));
+}
+
+TEST(StderrLogSinkDeathTest, SecondaryFatalMessagesInStderr) {
+  auto MessageGen = []() -> std::string {
+    LOG(FATAL) << "Internal failure";
+    return "External failure";
+  };
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo);
+        LOG(FATAL) << MessageGen();
+      },
+      "Internal failure");
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/internal/strip.h b/abseil-cpp/absl/log/internal/strip.h
new file mode 100644
index 0000000..adc86ff
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/strip.h
@@ -0,0 +1,71 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/strip.h
+// -----------------------------------------------------------------------------
+//
+
+#ifndef ABSL_LOG_INTERNAL_STRIP_H_
+#define ABSL_LOG_INTERNAL_STRIP_H_
+
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/log_message.h"
+#include "absl/log/internal/nullstream.h"
+
+// `ABSL_LOGGING_INTERNAL_LOG_*` evaluates to a temporary `LogMessage` object or
+// to a related object with a compatible API but different behavior.  This set
+// of defines comes in three flavors: vanilla, plus two variants that strip some
+// logging in subtly different ways for subtly different reasons (see below).
+#if defined(STRIP_LOG) && STRIP_LOG
+#define ABSL_LOGGING_INTERNAL_LOG_INFO ::absl::log_internal::NullStream()
+#define ABSL_LOGGING_INTERNAL_LOG_WARNING ::absl::log_internal::NullStream()
+#define ABSL_LOGGING_INTERNAL_LOG_ERROR ::absl::log_internal::NullStream()
+#define ABSL_LOGGING_INTERNAL_LOG_FATAL ::absl::log_internal::NullStreamFatal()
+#define ABSL_LOGGING_INTERNAL_LOG_QFATAL ::absl::log_internal::NullStreamFatal()
+#define ABSL_LOGGING_INTERNAL_LOG_DFATAL \
+  ::absl::log_internal::NullStreamMaybeFatal(::absl::kLogDebugFatal)
+#define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \
+  ::absl::log_internal::NullStreamMaybeFatal(log_internal_severity)
+#define ABSL_LOG_INTERNAL_CHECK(failure_message) ABSL_LOGGING_INTERNAL_LOG_FATAL
+#define ABSL_LOG_INTERNAL_QCHECK(failure_message) \
+  ABSL_LOGGING_INTERNAL_LOG_QFATAL
+#else  // !defined(STRIP_LOG) || !STRIP_LOG
+#define ABSL_LOGGING_INTERNAL_LOG_INFO \
+  ::absl::log_internal::LogMessage(    \
+      __FILE__, __LINE__, ::absl::log_internal::LogMessage::InfoTag{})
+#define ABSL_LOGGING_INTERNAL_LOG_WARNING \
+  ::absl::log_internal::LogMessage(       \
+      __FILE__, __LINE__, ::absl::log_internal::LogMessage::WarningTag{})
+#define ABSL_LOGGING_INTERNAL_LOG_ERROR \
+  ::absl::log_internal::LogMessage(     \
+      __FILE__, __LINE__, ::absl::log_internal::LogMessage::ErrorTag{})
+#define ABSL_LOGGING_INTERNAL_LOG_FATAL \
+  ::absl::log_internal::LogMessageFatal(__FILE__, __LINE__)
+#define ABSL_LOGGING_INTERNAL_LOG_QFATAL \
+  ::absl::log_internal::LogMessageQuietlyFatal(__FILE__, __LINE__)
+#define ABSL_LOGGING_INTERNAL_LOG_DFATAL \
+  ::absl::log_internal::LogMessage(__FILE__, __LINE__, ::absl::kLogDebugFatal)
+#define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \
+  ::absl::log_internal::LogMessage(__FILE__, __LINE__, log_internal_severity)
+// These special cases dispatch to special-case constructors that allow us to
+// avoid an extra function call and shrink non-LTO binaries by a percent or so.
+#define ABSL_LOG_INTERNAL_CHECK(failure_message) \
+  ::absl::log_internal::LogMessageFatal(__FILE__, __LINE__, failure_message)
+#define ABSL_LOG_INTERNAL_QCHECK(failure_message)                  \
+  ::absl::log_internal::LogMessageQuietlyFatal(__FILE__, __LINE__, \
+                                               failure_message)
+#endif  // !defined(STRIP_LOG) || !STRIP_LOG
+
+#endif  // ABSL_LOG_INTERNAL_STRIP_H_
diff --git a/abseil-cpp/absl/log/internal/structured.h b/abseil-cpp/absl/log/internal/structured.h
new file mode 100644
index 0000000..5223dbc
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/structured.h
@@ -0,0 +1,58 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/structured.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_LOG_INTERNAL_STRUCTURED_H_
+#define ABSL_LOG_INTERNAL_STRUCTURED_H_
+
+#include <ostream>
+
+#include "absl/base/config.h"
+#include "absl/log/internal/log_message.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+class ABSL_MUST_USE_RESULT AsLiteralImpl final {
+ public:
+  explicit AsLiteralImpl(absl::string_view str) : str_(str) {}
+  AsLiteralImpl(const AsLiteralImpl&) = default;
+  AsLiteralImpl& operator=(const AsLiteralImpl&) = default;
+
+ private:
+  absl::string_view str_;
+
+  friend std::ostream& operator<<(std::ostream& os, AsLiteralImpl as_literal) {
+    return os << as_literal.str_;
+  }
+  void AddToMessage(log_internal::LogMessage& m) {
+    m.CopyToEncodedBuffer<log_internal::LogMessage::StringType::kLiteral>(str_);
+  }
+  friend log_internal::LogMessage& operator<<(log_internal::LogMessage& m,
+                                              AsLiteralImpl as_literal) {
+    as_literal.AddToMessage(m);
+    return m;
+  }
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_STRUCTURED_H_
diff --git a/abseil-cpp/absl/log/internal/test_actions.cc b/abseil-cpp/absl/log/internal/test_actions.cc
new file mode 100644
index 0000000..bdfd637
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/test_actions.cc
@@ -0,0 +1,75 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/test_actions.h"
+
+#include <cassert>
+#include <iostream>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/strings/escaping.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+void WriteToStderrWithFilename::operator()(const absl::LogEntry& entry) const {
+  std::cerr << message << " (file: " << entry.source_filename() << ")\n";
+}
+
+void WriteEntryToStderr::operator()(const absl::LogEntry& entry) const {
+  if (!message.empty()) std::cerr << message << "\n";
+
+  const std::string source_filename = absl::CHexEscape(entry.source_filename());
+  const std::string source_basename = absl::CHexEscape(entry.source_basename());
+  const std::string text_message = absl::CHexEscape(entry.text_message());
+  const std::string encoded_message = absl::CHexEscape(entry.encoded_message());
+  std::string encoded_message_str;
+  std::cerr << "LogEntry{\n"                                               //
+            << "  source_filename: \"" << source_filename << "\"\n"        //
+            << "  source_basename: \"" << source_basename << "\"\n"        //
+            << "  source_line: " << entry.source_line() << "\n"            //
+            << "  prefix: " << (entry.prefix() ? "true\n" : "false\n")     //
+            << "  log_severity: " << entry.log_severity() << "\n"          //
+            << "  timestamp: " << entry.timestamp() << "\n"                //
+            << "  text_message: \"" << text_message << "\"\n"              //
+            << "  verbosity: " << entry.verbosity() << "\n"                //
+            << "  encoded_message (raw): \"" << encoded_message << "\"\n"  //
+            << encoded_message_str                                         //
+            << "}\n";
+}
+
+void WriteEntryToStderr::operator()(absl::LogSeverity severity,
+                                    absl::string_view filename,
+                                    absl::string_view log_message) const {
+  if (!message.empty()) std::cerr << message << "\n";
+  const std::string source_filename = absl::CHexEscape(filename);
+  const std::string text_message = absl::CHexEscape(log_message);
+  std::cerr << "LogEntry{\n"                                         //
+            << "  source_filename: \"" << source_filename << "\"\n"  //
+            << "  log_severity: " << severity << "\n"                //
+            << "  text_message: \"" << text_message << "\"\n"        //
+            << "}\n";
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/test_actions.h b/abseil-cpp/absl/log/internal/test_actions.h
new file mode 100644
index 0000000..649a050
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/test_actions.h
@@ -0,0 +1,90 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/test_actions.h
+// -----------------------------------------------------------------------------
+//
+// This file declares Googletest's actions used in the Abseil Logging library
+// unit tests.
+
+#ifndef ABSL_LOG_INTERNAL_TEST_ACTIONS_H_
+#define ABSL_LOG_INTERNAL_TEST_ACTIONS_H_
+
+#include <iostream>
+#include <ostream>
+#include <string>
+
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/log_entry.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// These actions are used by the child process in a death test.
+//
+// Expectations set in the child cannot cause test failure in the parent
+// directly.  Instead, the child can use these actions with
+// `EXPECT_CALL`/`WillOnce` and `ON_CALL`/`WillByDefault` (for unexpected calls)
+// to write messages to stderr that the parent can match against.
+struct WriteToStderr final {
+  explicit WriteToStderr(absl::string_view m) : message(m) {}
+  std::string message;
+
+  template <typename... Args>
+  void operator()(const Args&...) const {
+    std::cerr << message << std::endl;
+  }
+};
+
+struct WriteToStderrWithFilename final {
+  explicit WriteToStderrWithFilename(absl::string_view m) : message(m) {}
+
+  std::string message;
+
+  void operator()(const absl::LogEntry& entry) const;
+};
+
+struct WriteEntryToStderr final {
+  explicit WriteEntryToStderr(absl::string_view m) : message(m) {}
+
+  std::string message = "";
+
+  void operator()(const absl::LogEntry& entry) const;
+  void operator()(absl::LogSeverity, absl::string_view,
+                  absl::string_view) const;
+};
+
+// See the documentation for `DeathTestValidateExpectations` above.
+// `DeathTestExpectedLogging` should be used once in a given death test, and the
+// applicable severity level is the one that should be passed to
+// `DeathTestValidateExpectations`.
+inline WriteEntryToStderr DeathTestExpectedLogging() {
+  return WriteEntryToStderr{"Mock received expected entry:"};
+}
+
+// `DeathTestUnexpectedLogging` should be used zero or more times to mark
+// messages that should not hit the logs as the process dies.
+inline WriteEntryToStderr DeathTestUnexpectedLogging() {
+  return WriteEntryToStderr{"Mock received unexpected entry:"};
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_TEST_ACTIONS_H_
diff --git a/abseil-cpp/absl/log/internal/test_helpers.cc b/abseil-cpp/absl/log/internal/test_helpers.cc
new file mode 100644
index 0000000..bfcc967
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/test_helpers.cc
@@ -0,0 +1,82 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "absl/log/internal/test_helpers.h"
+
+#ifdef __Fuchsia__
+#include <zircon/syscalls.h>
+#endif
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/globals.h"
+#include "absl/log/initialize.h"
+#include "absl/log/internal/globals.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// Returns false if the specified severity level is disabled by
+// `ABSL_MIN_LOG_LEVEL` or `absl::MinLogLevel()`.
+bool LoggingEnabledAt(absl::LogSeverity severity) {
+  return severity >= kAbslMinLogLevel && severity >= absl::MinLogLevel();
+}
+
+// -----------------------------------------------------------------------------
+// Googletest Death Test Predicates
+// -----------------------------------------------------------------------------
+
+#if GTEST_HAS_DEATH_TEST
+
+bool DiedOfFatal(int exit_status) {
+#if defined(_WIN32)
+  // Depending on NDEBUG and (configuration?) MSVC's abort either results
+  // in error code 3 (SIGABRT) or error code 0x80000003 (breakpoint
+  // triggered).
+  return ::testing::ExitedWithCode(3)(exit_status & 0x7fffffff);
+#elif defined(__Fuchsia__)
+  // The Fuchsia death test implementation kill()'s the process when it detects
+  // an exception, so it should exit with the corresponding code. See
+  // FuchsiaDeathTest::Wait().
+  return ::testing::ExitedWithCode(ZX_TASK_RETCODE_SYSCALL_KILL)(exit_status);
+#elif defined(__ANDROID__) && defined(__aarch64__)
+  // These are all run under a qemu config that eats died-due-to-signal exit
+  // statuses.
+  return true;
+#else
+  return ::testing::KilledBySignal(SIGABRT)(exit_status);
+#endif
+}
+
+bool DiedOfQFatal(int exit_status) {
+  return ::testing::ExitedWithCode(1)(exit_status);
+}
+
+#endif
+
+// -----------------------------------------------------------------------------
+// Helper for Log initialization in test
+// -----------------------------------------------------------------------------
+
+void LogTestEnvironment::SetUp() {
+  if (!absl::log_internal::IsInitialized()) {
+    absl::InitializeLog();
+  }
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/test_helpers.h b/abseil-cpp/absl/log/internal/test_helpers.h
new file mode 100644
index 0000000..714bc7b
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/test_helpers.h
@@ -0,0 +1,71 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/test_helpers.h
+// -----------------------------------------------------------------------------
+//
+// This file declares testing helpers for the logging library.
+
+#ifndef ABSL_LOG_INTERNAL_TEST_HELPERS_H_
+#define ABSL_LOG_INTERNAL_TEST_HELPERS_H_
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/globals.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+// `ABSL_MIN_LOG_LEVEL` can't be used directly since it is not always defined.
+constexpr auto kAbslMinLogLevel =
+#ifdef ABSL_MIN_LOG_LEVEL
+    static_cast<absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL);
+#else
+    absl::LogSeverityAtLeast::kInfo;
+#endif
+
+// Returns false if the specified severity level is disabled by
+// `ABSL_MIN_LOG_LEVEL` or `absl::MinLogLevel()`.
+bool LoggingEnabledAt(absl::LogSeverity severity);
+
+// -----------------------------------------------------------------------------
+// Googletest Death Test Predicates
+// -----------------------------------------------------------------------------
+
+#if GTEST_HAS_DEATH_TEST
+
+bool DiedOfFatal(int exit_status);
+bool DiedOfQFatal(int exit_status);
+
+#endif
+
+// -----------------------------------------------------------------------------
+// Helper for Log initialization in test
+// -----------------------------------------------------------------------------
+
+class LogTestEnvironment : public ::testing::Environment {
+ public:
+  ~LogTestEnvironment() override = default;
+
+  void SetUp() override;
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_TEST_HELPERS_H_
diff --git a/abseil-cpp/absl/log/internal/test_matchers.cc b/abseil-cpp/absl/log/internal/test_matchers.cc
new file mode 100644
index 0000000..8c6515c
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/test_matchers.cc
@@ -0,0 +1,217 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/internal/test_matchers.h"
+
+#include <ostream>
+#include <sstream>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+namespace {
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Ge;
+using ::testing::HasSubstr;
+using ::testing::MakeMatcher;
+using ::testing::Matcher;
+using ::testing::MatcherInterface;
+using ::testing::MatchResultListener;
+using ::testing::Not;
+using ::testing::Property;
+using ::testing::ResultOf;
+using ::testing::Truly;
+
+class AsStringImpl final
+    : public MatcherInterface<absl::string_view> {
+ public:
+  explicit AsStringImpl(
+      const Matcher<const std::string&>& str_matcher)
+      : str_matcher_(str_matcher) {}
+  bool MatchAndExplain(
+      absl::string_view actual,
+      MatchResultListener* listener) const override {
+    return str_matcher_.MatchAndExplain(std::string(actual), listener);
+  }
+  void DescribeTo(std::ostream* os) const override {
+    return str_matcher_.DescribeTo(os);
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    return str_matcher_.DescribeNegationTo(os);
+  }
+
+ private:
+  const Matcher<const std::string&> str_matcher_;
+};
+
+class MatchesOstreamImpl final
+    : public MatcherInterface<absl::string_view> {
+ public:
+  explicit MatchesOstreamImpl(std::string expected)
+      : expected_(std::move(expected)) {}
+  bool MatchAndExplain(absl::string_view actual,
+                       MatchResultListener*) const override {
+    return actual == expected_;
+  }
+  void DescribeTo(std::ostream* os) const override {
+    *os << "matches the contents of the ostringstream, which are \""
+        << expected_ << "\"";
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "does not match the contents of the ostringstream, which are \""
+        << expected_ << "\"";
+  }
+
+ private:
+  const std::string expected_;
+};
+}  // namespace
+
+Matcher<absl::string_view> AsString(
+    const Matcher<const std::string&>& str_matcher) {
+  return MakeMatcher(new AsStringImpl(str_matcher));
+}
+
+Matcher<const absl::LogEntry&> SourceFilename(
+    const Matcher<absl::string_view>& source_filename) {
+  return Property("source_filename", &absl::LogEntry::source_filename,
+                  source_filename);
+}
+
+Matcher<const absl::LogEntry&> SourceBasename(
+    const Matcher<absl::string_view>& source_basename) {
+  return Property("source_basename", &absl::LogEntry::source_basename,
+                  source_basename);
+}
+
+Matcher<const absl::LogEntry&> SourceLine(
+    const Matcher<int>& source_line) {
+  return Property("source_line", &absl::LogEntry::source_line, source_line);
+}
+
+Matcher<const absl::LogEntry&> Prefix(
+    const Matcher<bool>& prefix) {
+  return Property("prefix", &absl::LogEntry::prefix, prefix);
+}
+
+Matcher<const absl::LogEntry&> LogSeverity(
+    const Matcher<absl::LogSeverity>& log_severity) {
+  return Property("log_severity", &absl::LogEntry::log_severity, log_severity);
+}
+
+Matcher<const absl::LogEntry&> Timestamp(
+    const Matcher<absl::Time>& timestamp) {
+  return Property("timestamp", &absl::LogEntry::timestamp, timestamp);
+}
+
+Matcher<const absl::LogEntry&> TimestampInMatchWindow() {
+  return Property("timestamp", &absl::LogEntry::timestamp,
+                  AllOf(Ge(absl::Now()), Truly([](absl::Time arg) {
+                          return arg <= absl::Now();
+                        })));
+}
+
+Matcher<const absl::LogEntry&> ThreadID(
+    const Matcher<absl::LogEntry::tid_t>& tid) {
+  return Property("tid", &absl::LogEntry::tid, tid);
+}
+
+Matcher<const absl::LogEntry&> TextMessageWithPrefixAndNewline(
+    const Matcher<absl::string_view>&
+        text_message_with_prefix_and_newline) {
+  return Property("text_message_with_prefix_and_newline",
+                  &absl::LogEntry::text_message_with_prefix_and_newline,
+                  text_message_with_prefix_and_newline);
+}
+
+Matcher<const absl::LogEntry&> TextMessageWithPrefix(
+    const Matcher<absl::string_view>& text_message_with_prefix) {
+  return Property("text_message_with_prefix",
+                  &absl::LogEntry::text_message_with_prefix,
+                  text_message_with_prefix);
+}
+
+Matcher<const absl::LogEntry&> TextMessage(
+    const Matcher<absl::string_view>& text_message) {
+  return Property("text_message", &absl::LogEntry::text_message, text_message);
+}
+
+Matcher<const absl::LogEntry&> TextPrefix(
+    const Matcher<absl::string_view>& text_prefix) {
+  return ResultOf(
+      [](const absl::LogEntry& entry) {
+        absl::string_view msg = entry.text_message_with_prefix();
+        msg.remove_suffix(entry.text_message().size());
+        return msg;
+      },
+      text_prefix);
+}
+Matcher<const absl::LogEntry&> RawEncodedMessage(
+    const Matcher<absl::string_view>& raw_encoded_message) {
+  return Property("encoded_message", &absl::LogEntry::encoded_message,
+                  raw_encoded_message);
+}
+
+Matcher<const absl::LogEntry&> Verbosity(
+    const Matcher<int>& verbosity) {
+  return Property("verbosity", &absl::LogEntry::verbosity, verbosity);
+}
+
+Matcher<const absl::LogEntry&> Stacktrace(
+    const Matcher<absl::string_view>& stacktrace) {
+  return Property("stacktrace", &absl::LogEntry::stacktrace, stacktrace);
+}
+
+Matcher<absl::string_view> MatchesOstream(
+    const std::ostringstream& stream) {
+  return MakeMatcher(new MatchesOstreamImpl(stream.str()));
+}
+
+// We need to validate what is and isn't logged as the process dies due to
+// `FATAL`, `QFATAL`, `CHECK`, etc., but assertions inside a death test
+// subprocess don't directly affect the pass/fail status of the parent process.
+// Instead, we use the mock actions `DeathTestExpectedLogging` and
+// `DeathTestUnexpectedLogging` to write specific phrases to `stderr` that we
+// can validate in the parent process using this matcher.
+Matcher<const std::string&> DeathTestValidateExpectations() {
+  if (log_internal::LoggingEnabledAt(absl::LogSeverity::kFatal)) {
+    return Matcher<const std::string&>(
+        AllOf(HasSubstr("Mock received expected entry"),
+              Not(HasSubstr("Mock received unexpected entry"))));
+  }
+  // If `FATAL` logging is disabled, neither message should have been written.
+  return Matcher<const std::string&>(
+      AllOf(Not(HasSubstr("Mock received expected entry")),
+            Not(HasSubstr("Mock received unexpected entry"))));
+}
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/internal/test_matchers.h b/abseil-cpp/absl/log/internal/test_matchers.h
new file mode 100644
index 0000000..fc653a9
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/test_matchers.h
@@ -0,0 +1,94 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/test_matchers.h
+// -----------------------------------------------------------------------------
+//
+// This file declares Googletest's matchers used in the Abseil Logging library
+// unit tests.
+
+#ifndef ABSL_LOG_INTERNAL_TEST_MATCHERS_H_
+#define ABSL_LOG_INTERNAL_TEST_MATCHERS_H_
+
+#include <iosfwd>
+#include <sstream>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/log_entry.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+// In some configurations, Googletest's string matchers (e.g.
+// `::testing::EndsWith`) need help to match `absl::string_view`.
+::testing::Matcher<absl::string_view> AsString(
+    const ::testing::Matcher<const std::string&>& str_matcher);
+
+// These matchers correspond to the components of `absl::LogEntry`.
+::testing::Matcher<const absl::LogEntry&> SourceFilename(
+    const ::testing::Matcher<absl::string_view>& source_filename);
+::testing::Matcher<const absl::LogEntry&> SourceBasename(
+    const ::testing::Matcher<absl::string_view>& source_basename);
+// Be careful with this one; multi-line statements using `__LINE__` evaluate
+// differently on different platforms.  In particular, the MSVC implementation
+// of `EXPECT_DEATH` returns the line number of the macro expansion to all lines
+// within the code block that's expected to die.
+::testing::Matcher<const absl::LogEntry&> SourceLine(
+    const ::testing::Matcher<int>& source_line);
+::testing::Matcher<const absl::LogEntry&> Prefix(
+    const ::testing::Matcher<bool>& prefix);
+::testing::Matcher<const absl::LogEntry&> LogSeverity(
+    const ::testing::Matcher<absl::LogSeverity>& log_severity);
+::testing::Matcher<const absl::LogEntry&> Timestamp(
+    const ::testing::Matcher<absl::Time>& timestamp);
+// Matches if the `LogEntry`'s timestamp falls after the instantiation of this
+// matcher and before its execution, as is normal when used with EXPECT_CALL.
+::testing::Matcher<const absl::LogEntry&> TimestampInMatchWindow();
+::testing::Matcher<const absl::LogEntry&> ThreadID(
+    const ::testing::Matcher<absl::LogEntry::tid_t>&);
+::testing::Matcher<const absl::LogEntry&> TextMessageWithPrefixAndNewline(
+    const ::testing::Matcher<absl::string_view>&
+        text_message_with_prefix_and_newline);
+::testing::Matcher<const absl::LogEntry&> TextMessageWithPrefix(
+    const ::testing::Matcher<absl::string_view>& text_message_with_prefix);
+::testing::Matcher<const absl::LogEntry&> TextMessage(
+    const ::testing::Matcher<absl::string_view>& text_message);
+::testing::Matcher<const absl::LogEntry&> TextPrefix(
+    const ::testing::Matcher<absl::string_view>& text_prefix);
+::testing::Matcher<const absl::LogEntry&> Verbosity(
+    const ::testing::Matcher<int>& verbosity);
+::testing::Matcher<const absl::LogEntry&> Stacktrace(
+    const ::testing::Matcher<absl::string_view>& stacktrace);
+// Behaves as `Eq(stream.str())`, but produces better failure messages.
+::testing::Matcher<absl::string_view> MatchesOstream(
+    const std::ostringstream& stream);
+::testing::Matcher<const std::string&> DeathTestValidateExpectations();
+
+::testing::Matcher<const absl::LogEntry&> RawEncodedMessage(
+    const ::testing::Matcher<absl::string_view>& raw_encoded_message);
+#define ENCODED_MESSAGE(message_matcher) ::testing::_
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_TEST_MATCHERS_H_
diff --git a/abseil-cpp/absl/log/internal/voidify.h b/abseil-cpp/absl/log/internal/voidify.h
new file mode 100644
index 0000000..8f62da2
--- /dev/null
+++ b/abseil-cpp/absl/log/internal/voidify.h
@@ -0,0 +1,44 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/internal/voidify.h
+// -----------------------------------------------------------------------------
+//
+// This class is used to explicitly ignore values in the conditional logging
+// macros. This avoids compiler warnings like "value computed is not used" and
+// "statement has no effect".
+
+#ifndef ABSL_LOG_INTERNAL_VOIDIFY_H_
+#define ABSL_LOG_INTERNAL_VOIDIFY_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+class Voidify final {
+ public:
+  // This has to be an operator with a precedence lower than << but higher than
+  // ?:
+  template <typename T>
+  void operator&&(const T&) const&& {}
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_INTERNAL_VOIDIFY_H_
diff --git a/abseil-cpp/absl/log/log.h b/abseil-cpp/absl/log/log.h
new file mode 100644
index 0000000..602b5ac
--- /dev/null
+++ b/abseil-cpp/absl/log/log.h
@@ -0,0 +1,320 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/log.h
+// -----------------------------------------------------------------------------
+//
+// This header declares a family of LOG macros.
+//
+// Basic invocation looks like this:
+//
+//   LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// Most `LOG` macros take a severity level argument.  The severity levels are
+// `INFO`, `WARNING`, `ERROR`, and `FATAL`.  They are defined
+// in absl/base/log_severity.h.
+// * The `FATAL` severity level terminates the program with a stack trace after
+//   logging its message.  Error handlers registered with `RunOnFailure`
+//   (process_state.h) are run, but exit handlers registered with `atexit(3)`
+//   are not.
+// * The `QFATAL` pseudo-severity level is equivalent to `FATAL` but triggers
+//   quieter termination messages, e.g. without a full stack trace, and skips
+//   running registered error handlers.
+// Some preprocessor shenanigans are used to ensure that e.g. `LOG(INFO)` has
+// the same meaning even if a local symbol or preprocessor macro named `INFO` is
+// defined.  To specify a severity level using an expression instead of a
+// literal, use `LEVEL(expr)`.
+// Example:
+//
+//   LOG(LEVEL(stale ? absl::LogSeverity::kWarning : absl::LogSeverity::kInfo))
+//       << "Cookies are " << days << " days old";
+
+// `LOG` macros evaluate to an unterminated statement.  The value at the end of
+// the statement supports some chainable methods:
+//
+//   * .AtLocation(absl::string_view file, int line)
+//     .AtLocation(absl::SourceLocation loc)
+//     Overrides the location inferred from the callsite.  The string pointed to
+//     by `file` must be valid until the end of the statement.
+//   * .NoPrefix()
+//     Omits the prefix from this line.  The prefix includes metadata about the
+//     logged data such as source code location and timestamp.
+//   * .WithTimestamp(absl::Time timestamp)
+//     Uses the specified timestamp instead of one collected at the time of
+//     execution.
+//   * .WithThreadID(absl::LogEntry::tid_t tid)
+//     Uses the specified thread ID instead of one collected at the time of
+//     execution.
+//   * .WithMetadataFrom(const absl::LogEntry &entry)
+//     Copies all metadata (but no data) from the specified `absl::LogEntry`.
+//     This can be used to change the severity of a message, but it has some
+//     limitations:
+//     * `ABSL_MIN_LOG_LEVEL` is evaluated against the severity passed into
+//       `LOG` (or the implicit `FATAL` level of `CHECK`).
+//     * `LOG(FATAL)` and `CHECK` terminate the process unconditionally, even if
+//       the severity is changed later.
+//     `.WithMetadataFrom(entry)` should almost always be used in combination
+//     with `LOG(LEVEL(entry.log_severity()))`.
+//   * .WithPerror()
+//     Appends to the logged message a colon, a space, a textual description of
+//     the current value of `errno` (as by `strerror(3)`), and the numerical
+//     value of `errno`.
+//   * .ToSinkAlso(absl::LogSink* sink)
+//     Sends this message to `*sink` in addition to whatever other sinks it
+//     would otherwise have been sent to.  `sink` must not be null.
+//   * .ToSinkOnly(absl::LogSink* sink)
+//     Sends this message to `*sink` and no others.  `sink` must not be null.
+//
+// No interfaces in this header are async-signal-safe; their use in signal
+// handlers is unsupported and may deadlock your program or eat your lunch.
+//
+// Many logging statements are inherently conditional.  For example,
+// `LOG_IF(INFO, !foo)` does nothing if `foo` is true.  Even seemingly
+// unconditional statements like `LOG(INFO)` might be disabled at
+// compile-time to minimize binary size or for security reasons.
+//
+// * Except for the condition in a `CHECK` or `QCHECK` statement, programs must
+//   not rely on evaluation of expressions anywhere in logging statements for
+//   correctness.  For example, this is ok:
+//
+//     CHECK((fp = fopen("config.ini", "r")) != nullptr);
+//
+//   But this is probably not ok:
+//
+//     LOG(INFO) << "Server status: " << StartServerAndReturnStatusString();
+//
+//   The example below is bad too; the `i++` in the `LOG_IF` condition might
+//   not be evaluated, resulting in an infinite loop:
+//
+//     for (int i = 0; i < 1000000;)
+//       LOG_IF(INFO, i++ % 1000 == 0) << "Still working...";
+//
+// * Except where otherwise noted, conditions which cause a statement not to log
+//   also cause expressions not to be evaluated.  Programs may rely on this for
+//   performance reasons, e.g. by streaming the result of an expensive function
+//   call into a `DLOG` or `LOG_EVERY_N` statement.
+// * Care has been taken to ensure that expressions are parsed by the compiler
+//   even if they are never evaluated.  This means that syntax errors will be
+//   caught and variables will be considered used for the purposes of
+//   unused-variable diagnostics.  For example, this statement won't compile
+//   even if `INFO`-level logging has been compiled out:
+//
+//     int number_of_cakes = 40;
+//     LOG(INFO) << "Number of cakes: " << number_of_cake;  // Note the typo!
+//
+//   Similarly, this won't produce unused-variable compiler diagnostics even
+//   if `INFO`-level logging is compiled out:
+//
+//     {
+//       char fox_line1[] = "Hatee-hatee-hatee-ho!";
+//       LOG_IF(ERROR, false) << "The fox says " << fox_line1;
+//       char fox_line2[] = "A-oo-oo-oo-ooo!";
+//       LOG(INFO) << "The fox also says " << fox_line2;
+//     }
+//
+//   This error-checking is not perfect; for example, symbols that have been
+//   declared but not defined may not produce link errors if used in logging
+//   statements that compile away.
+//
+// Expressions streamed into these macros are formatted using `operator<<` just
+// as they would be if streamed into a `std::ostream`, however it should be
+// noted that their actual type is unspecified.
+//
+// To implement a custom formatting operator for a type you own, there are two
+// options: `AbslStringify()` or `std::ostream& operator<<(std::ostream&, ...)`.
+// It is recommended that users make their types loggable through
+// `AbslStringify()` as it is a universal stringification extension that also
+// enables `absl::StrFormat` and `absl::StrCat` support. If both
+// `AbslStringify()` and `std::ostream& operator<<(std::ostream&, ...)` are
+// defined, `AbslStringify()` will be used.
+//
+// To use the `AbslStringify()` API, define a friend function template in your
+// type's namespace with the following signature:
+//
+//   template <typename Sink>
+//   void AbslStringify(Sink& sink, const UserDefinedType& value);
+//
+// `Sink` has the same interface as `absl::FormatSink`, but without
+// `PutPaddedString()`.
+//
+// Example:
+//
+//   struct Point {
+//     template <typename Sink>
+//     friend void AbslStringify(Sink& sink, const Point& p) {
+//       absl::Format(&sink, "(%v, %v)", p.x, p.y);
+//     }
+//
+//     int x;
+//     int y;
+//   };
+//
+// To use `std::ostream& operator<<(std::ostream&, ...)`, define
+// `std::ostream& operator<<(std::ostream&, ...)` in your type's namespace (for
+// ADL) just as you would to stream it to `std::cout`.
+//
+// Currently `AbslStringify()` ignores output manipulators but this is not
+// guaranteed behavior and may be subject to change in the future. If you would
+// like guaranteed behavior regarding output manipulators, please use
+// `std::ostream& operator<<(std::ostream&, ...)` to make custom types loggable
+// instead.
+//
+// Those macros that support streaming honor output manipulators and `fmtflag`
+// changes that output data (e.g. `std::ends`) or control formatting of data
+// (e.g. `std::hex` and `std::fixed`), however flushing such a stream is
+// ignored.  The message produced by a log statement is sent to registered
+// `absl::LogSink` instances at the end of the statement; those sinks are
+// responsible for their own flushing (e.g. to disk) semantics.
+//
+// Flag settings are not carried over from one `LOG` statement to the next; this
+// is a bit different than e.g. `std::cout`:
+//
+//   LOG(INFO) << std::hex << 0xdeadbeef;  // logs "0xdeadbeef"
+//   LOG(INFO) << 0xdeadbeef;              // logs "3735928559"
+
+#ifndef ABSL_LOG_LOG_H_
+#define ABSL_LOG_LOG_H_
+
+#include "absl/log/internal/log_impl.h"
+
+// LOG()
+//
+// `LOG` takes a single argument which is a severity level.  Data streamed in
+// comprise the logged message.
+// Example:
+//
+//   LOG(INFO) << "Found " << num_cookies << " cookies";
+#define LOG(severity) ABSL_LOG_INTERNAL_LOG_IMPL(_##severity)
+
+// PLOG()
+//
+// `PLOG` behaves like `LOG` except that a description of the current state of
+// `errno` is appended to the streamed message.
+#define PLOG(severity) ABSL_LOG_INTERNAL_PLOG_IMPL(_##severity)
+
+// DLOG()
+//
+// `DLOG` behaves like `LOG` in debug mode (i.e. `#ifndef NDEBUG`).  Otherwise
+// it compiles away and does nothing.  Note that `DLOG(FATAL)` does not
+// terminate the program if `NDEBUG` is defined.
+#define DLOG(severity) ABSL_LOG_INTERNAL_DLOG_IMPL(_##severity)
+
+// `LOG_IF` and friends add a second argument which specifies a condition.  If
+// the condition is false, nothing is logged.
+// Example:
+//
+//   LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+#define LOG_IF(severity, condition) \
+  ABSL_LOG_INTERNAL_LOG_IF_IMPL(_##severity, condition)
+#define PLOG_IF(severity, condition) \
+  ABSL_LOG_INTERNAL_PLOG_IF_IMPL(_##severity, condition)
+#define DLOG_IF(severity, condition) \
+  ABSL_LOG_INTERNAL_DLOG_IF_IMPL(_##severity, condition)
+
+// LOG_EVERY_N
+//
+// An instance of `LOG_EVERY_N` increments a hidden zero-initialized counter
+// every time execution passes through it and logs the specified message when
+// the counter's value is a multiple of `n`, doing nothing otherwise.  Each
+// instance has its own counter.  The counter's value can be logged by streaming
+// the symbol `COUNTER`.  `LOG_EVERY_N` is thread-safe.
+// Example:
+//
+//   LOG_EVERY_N(WARNING, 1000) << "Got a packet with a bad CRC (" << COUNTER
+//                              << " total)";
+#define LOG_EVERY_N(severity, n) \
+  ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(_##severity, n)
+
+// LOG_FIRST_N
+//
+// `LOG_FIRST_N` behaves like `LOG_EVERY_N` except that the specified message is
+// logged when the counter's value is less than `n`.  `LOG_FIRST_N` is
+// thread-safe.
+#define LOG_FIRST_N(severity, n) \
+  ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(_##severity, n)
+
+// LOG_EVERY_POW_2
+//
+// `LOG_EVERY_POW_2` behaves like `LOG_EVERY_N` except that the specified
+// message is logged when the counter's value is a power of 2.
+// `LOG_EVERY_POW_2` is thread-safe.
+#define LOG_EVERY_POW_2(severity) \
+  ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(_##severity)
+
+// LOG_EVERY_N_SEC
+//
+// An instance of `LOG_EVERY_N_SEC` uses a hidden state variable to log the
+// specified message at most once every `n_seconds`.  A hidden counter of
+// executions (whether a message is logged or not) is also maintained and can be
+// logged by streaming the symbol `COUNTER`.  `LOG_EVERY_N_SEC` is thread-safe.
+// Example:
+//
+//   LOG_EVERY_N_SEC(INFO, 2.5) << "Got " << COUNTER << " cookies so far";
+#define LOG_EVERY_N_SEC(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+
+#define PLOG_EVERY_N(severity, n) \
+  ABSL_LOG_INTERNAL_PLOG_EVERY_N_IMPL(_##severity, n)
+#define PLOG_FIRST_N(severity, n) \
+  ABSL_LOG_INTERNAL_PLOG_FIRST_N_IMPL(_##severity, n)
+#define PLOG_EVERY_POW_2(severity) \
+  ABSL_LOG_INTERNAL_PLOG_EVERY_POW_2_IMPL(_##severity)
+#define PLOG_EVERY_N_SEC(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_PLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+
+#define DLOG_EVERY_N(severity, n) \
+  ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(_##severity, n)
+#define DLOG_FIRST_N(severity, n) \
+  ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(_##severity, n)
+#define DLOG_EVERY_POW_2(severity) \
+  ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(_##severity)
+#define DLOG_EVERY_N_SEC(severity, n_seconds) \
+  ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+
+// `LOG_IF_EVERY_N` and friends behave as the corresponding `LOG_EVERY_N`
+// but neither increment a counter nor log a message if condition is false (as
+// `LOG_IF`).
+// Example:
+//
+//   LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER
+//                                           << "th big cookie";
+#define LOG_IF_EVERY_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+#define LOG_IF_FIRST_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_LOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+#define LOG_IF_EVERY_POW_2(severity, condition) \
+  ABSL_LOG_INTERNAL_LOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+#define LOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
+  ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+
+#define PLOG_IF_EVERY_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+#define PLOG_IF_FIRST_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_PLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+#define PLOG_IF_EVERY_POW_2(severity, condition) \
+  ABSL_LOG_INTERNAL_PLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+#define PLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
+  ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+
+#define DLOG_IF_EVERY_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+#define DLOG_IF_FIRST_N(severity, condition, n) \
+  ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+#define DLOG_IF_EVERY_POW_2(severity, condition) \
+  ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+#define DLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
+  ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+
+#endif  // ABSL_LOG_LOG_H_
diff --git a/abseil-cpp/absl/debugging/leak_check_disable.cc b/abseil-cpp/absl/log/log_basic_test.cc
similarity index 67%
rename from abseil-cpp/absl/debugging/leak_check_disable.cc
rename to abseil-cpp/absl/log/log_basic_test.cc
index 924d6e3..7fc7111 100644
--- a/abseil-cpp/absl/debugging/leak_check_disable.cc
+++ b/abseil-cpp/absl/log/log_basic_test.cc
@@ -1,4 +1,5 @@
-// Copyright 2017 The Abseil Authors.
+//
+// Copyright 2022 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,9 +13,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Disable LeakSanitizer when this file is linked in.
-// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
-extern "C" int __lsan_is_turned_off();
-extern "C" int __lsan_is_turned_off() {
-  return 1;
-}
+#include "absl/log/log.h"
+
+#define ABSL_TEST_LOG LOG
+
+#include "gtest/gtest.h"
+#include "absl/log/log_basic_test_impl.inc"
diff --git a/abseil-cpp/absl/log/log_basic_test_impl.inc b/abseil-cpp/absl/log/log_basic_test_impl.inc
new file mode 100644
index 0000000..f340009
--- /dev/null
+++ b/abseil-cpp/absl/log/log_basic_test_impl.inc
@@ -0,0 +1,455 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The testcases in this file are expected to pass or be skipped with any value
+// of ABSL_MIN_LOG_LEVEL
+
+#ifndef ABSL_LOG_LOG_BASIC_TEST_IMPL_H_
+#define ABSL_LOG_LOG_BASIC_TEST_IMPL_H_
+
+// Verify that both sets of macros behave identically by parameterizing the
+// entire test file.
+#ifndef ABSL_TEST_LOG
+#error ABSL_TEST_LOG must be defined for these tests to work.
+#endif
+
+#include <cerrno>
+#include <sstream>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/test_actions.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/scoped_mock_log.h"
+
+namespace absl_log_internal {
+#if GTEST_HAS_DEATH_TEST
+using ::absl::log_internal::DeathTestExpectedLogging;
+using ::absl::log_internal::DeathTestUnexpectedLogging;
+using ::absl::log_internal::DeathTestValidateExpectations;
+using ::absl::log_internal::DiedOfFatal;
+using ::absl::log_internal::DiedOfQFatal;
+#endif
+using ::absl::log_internal::LoggingEnabledAt;
+using ::absl::log_internal::LogSeverity;
+using ::absl::log_internal::Prefix;
+using ::absl::log_internal::SourceBasename;
+using ::absl::log_internal::SourceFilename;
+using ::absl::log_internal::SourceLine;
+using ::absl::log_internal::Stacktrace;
+using ::absl::log_internal::TextMessage;
+using ::absl::log_internal::ThreadID;
+using ::absl::log_internal::TimestampInMatchWindow;
+using ::absl::log_internal::Verbosity;
+using ::testing::AnyNumber;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::IsTrue;
+
+class BasicLogTest : public testing::TestWithParam<absl::LogSeverityAtLeast> {};
+
+std::string ThresholdName(
+    testing::TestParamInfo<absl::LogSeverityAtLeast> severity) {
+  std::stringstream ostr;
+  ostr << severity.param;
+  return ostr.str().substr(
+      severity.param == absl::LogSeverityAtLeast::kInfinity ? 0 : 2);
+}
+
+INSTANTIATE_TEST_SUITE_P(WithParam, BasicLogTest,
+                         testing::Values(absl::LogSeverityAtLeast::kInfo,
+                                         absl::LogSeverityAtLeast::kWarning,
+                                         absl::LogSeverityAtLeast::kError,
+                                         absl::LogSeverityAtLeast::kFatal,
+                                         absl::LogSeverityAtLeast::kInfinity),
+                         ThresholdName);
+
+TEST_P(BasicLogTest, Info) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { ABSL_TEST_LOG(INFO) << "hello world"; };
+
+  if (LoggingEnabledAt(absl::LogSeverity::kInfo)) {
+    EXPECT_CALL(
+        test_sink,
+        Send(AllOf(SourceFilename(Eq(__FILE__)),
+                   SourceBasename(Eq("log_basic_test_impl.inc")),
+                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                   LogSeverity(Eq(absl::LogSeverity::kInfo)),
+                   TimestampInMatchWindow(),
+                   ThreadID(Eq(absl::base_internal::GetTID())),
+                   TextMessage(Eq("hello world")),
+                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                      literal: "hello world"
+                                                    })pb")),
+                   Stacktrace(IsEmpty()))));
+  }
+
+  test_sink.StartCapturingLogs();
+  do_log();
+}
+
+TEST_P(BasicLogTest, Warning) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { ABSL_TEST_LOG(WARNING) << "hello world"; };
+
+  if (LoggingEnabledAt(absl::LogSeverity::kWarning)) {
+    EXPECT_CALL(
+        test_sink,
+        Send(AllOf(SourceFilename(Eq(__FILE__)),
+                   SourceBasename(Eq("log_basic_test_impl.inc")),
+                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                   LogSeverity(Eq(absl::LogSeverity::kWarning)),
+                   TimestampInMatchWindow(),
+                   ThreadID(Eq(absl::base_internal::GetTID())),
+                   TextMessage(Eq("hello world")),
+                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                      literal: "hello world"
+                                                    })pb")),
+                   Stacktrace(IsEmpty()))));
+  }
+
+  test_sink.StartCapturingLogs();
+  do_log();
+}
+
+TEST_P(BasicLogTest, Error) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { ABSL_TEST_LOG(ERROR) << "hello world"; };
+
+  if (LoggingEnabledAt(absl::LogSeverity::kError)) {
+    EXPECT_CALL(
+        test_sink,
+        Send(AllOf(SourceFilename(Eq(__FILE__)),
+                   SourceBasename(Eq("log_basic_test_impl.inc")),
+                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                   LogSeverity(Eq(absl::LogSeverity::kError)),
+                   TimestampInMatchWindow(),
+                   ThreadID(Eq(absl::base_internal::GetTID())),
+                   TextMessage(Eq("hello world")),
+                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                      literal: "hello world"
+                                                    })pb")),
+                   Stacktrace(IsEmpty()))));
+  }
+
+  test_sink.StartCapturingLogs();
+  do_log();
+}
+
+#if GTEST_HAS_DEATH_TEST
+using BasicLogDeathTest = BasicLogTest;
+
+INSTANTIATE_TEST_SUITE_P(WithParam, BasicLogDeathTest,
+                         testing::Values(absl::LogSeverityAtLeast::kInfo,
+                                         absl::LogSeverityAtLeast::kFatal,
+                                         absl::LogSeverityAtLeast::kInfinity),
+                         ThresholdName);
+
+TEST_P(BasicLogDeathTest, Fatal) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { ABSL_TEST_LOG(FATAL) << "hello world"; };
+
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink(
+            absl::MockLogDefault::kDisallowUnexpected);
+
+        EXPECT_CALL(test_sink, Send)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        ::testing::InSequence s;
+
+        // Note the logic in DeathTestValidateExpectations() caters for the case
+        // of logging being disabled at FATAL level.
+
+        if (LoggingEnabledAt(absl::LogSeverity::kFatal)) {
+          // The first call without the stack trace.
+          EXPECT_CALL(
+              test_sink,
+              Send(AllOf(SourceFilename(Eq(__FILE__)),
+                         SourceBasename(Eq("log_basic_test_impl.inc")),
+                         SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                         LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                         TimestampInMatchWindow(),
+                         ThreadID(Eq(absl::base_internal::GetTID())),
+                         TextMessage(Eq("hello world")),
+                         Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { literal: "hello world" })pb")),
+                         Stacktrace(IsEmpty()))))
+              .WillOnce(DeathTestExpectedLogging());
+
+          // The second call with the stack trace.
+          EXPECT_CALL(
+              test_sink,
+              Send(AllOf(SourceFilename(Eq(__FILE__)),
+                         SourceBasename(Eq("log_basic_test_impl.inc")),
+                         SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                         LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                         TimestampInMatchWindow(),
+                         ThreadID(Eq(absl::base_internal::GetTID())),
+                         TextMessage(Eq("hello world")),
+                         Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { literal: "hello world" })pb")),
+                         Stacktrace(Not(IsEmpty())))))
+              .WillOnce(DeathTestExpectedLogging());
+        }
+
+        test_sink.StartCapturingLogs();
+        do_log();
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+TEST_P(BasicLogDeathTest, QFatal) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { ABSL_TEST_LOG(QFATAL) << "hello world"; };
+
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink(
+            absl::MockLogDefault::kDisallowUnexpected);
+
+        EXPECT_CALL(test_sink, Send)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        if (LoggingEnabledAt(absl::LogSeverity::kFatal)) {
+          EXPECT_CALL(
+              test_sink,
+              Send(AllOf(SourceFilename(Eq(__FILE__)),
+                         SourceBasename(Eq("log_basic_test_impl.inc")),
+                         SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                         LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                         TimestampInMatchWindow(),
+                         ThreadID(Eq(absl::base_internal::GetTID())),
+                         TextMessage(Eq("hello world")),
+                         Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { literal: "hello world" })pb")),
+                         Stacktrace(IsEmpty()))))
+              .WillOnce(DeathTestExpectedLogging());
+        }
+
+        test_sink.StartCapturingLogs();
+        do_log();
+      },
+      DiedOfQFatal, DeathTestValidateExpectations());
+}
+#endif
+
+TEST_P(BasicLogTest, Level) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  for (auto severity : {absl::LogSeverity::kInfo, absl::LogSeverity::kWarning,
+                        absl::LogSeverity::kError}) {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+    const int log_line = __LINE__ + 2;
+    auto do_log = [severity] {
+      ABSL_TEST_LOG(LEVEL(severity)) << "hello world";
+    };
+
+    if (LoggingEnabledAt(severity)) {
+      EXPECT_CALL(
+          test_sink,
+          Send(AllOf(SourceFilename(Eq(__FILE__)),
+                     SourceBasename(Eq("log_basic_test_impl.inc")),
+                     SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                     LogSeverity(Eq(severity)), TimestampInMatchWindow(),
+                     ThreadID(Eq(absl::base_internal::GetTID())),
+                     TextMessage(Eq("hello world")),
+                     Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                     ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                        literal: "hello world"
+                                                      })pb")),
+                     Stacktrace(IsEmpty()))));
+    }
+    test_sink.StartCapturingLogs();
+    do_log();
+  }
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST_P(BasicLogDeathTest, Level) {
+  // TODO(b/242568884): re-enable once bug is fixed.
+  // absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  // Ensure that `severity` is not a compile-time constant to prove that
+  // `LOG(LEVEL(severity))` works regardless:
+  auto volatile severity = absl::LogSeverity::kFatal;
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [severity] { ABSL_TEST_LOG(LEVEL(severity)) << "hello world"; };
+
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink(
+            absl::MockLogDefault::kDisallowUnexpected);
+
+        EXPECT_CALL(test_sink, Send)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        ::testing::InSequence s;
+
+        if (LoggingEnabledAt(absl::LogSeverity::kFatal)) {
+          EXPECT_CALL(
+              test_sink,
+              Send(AllOf(SourceFilename(Eq(__FILE__)),
+                         SourceBasename(Eq("log_basic_test_impl.inc")),
+                         SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                         LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                         TimestampInMatchWindow(),
+                         ThreadID(Eq(absl::base_internal::GetTID())),
+                         TextMessage(Eq("hello world")),
+                         Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { literal: "hello world" })pb")),
+                         Stacktrace(IsEmpty()))))
+              .WillOnce(DeathTestExpectedLogging());
+
+          EXPECT_CALL(
+              test_sink,
+              Send(AllOf(SourceFilename(Eq(__FILE__)),
+                         SourceBasename(Eq("log_basic_test_impl.inc")),
+                         SourceLine(Eq(log_line)), Prefix(IsTrue()),
+                         LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                         TimestampInMatchWindow(),
+                         ThreadID(Eq(absl::base_internal::GetTID())),
+                         TextMessage(Eq("hello world")),
+                         Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { literal: "hello world" })pb")),
+                         Stacktrace(Not(IsEmpty())))))
+              .WillOnce(DeathTestExpectedLogging());
+        }
+
+        test_sink.StartCapturingLogs();
+        do_log();
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+#endif
+
+TEST_P(BasicLogTest, LevelClampsNegativeValues) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  if (!LoggingEnabledAt(absl::LogSeverity::kInfo)) {
+    GTEST_SKIP() << "This test cases required INFO log to be enabled";
+    return;
+  }
+
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(LogSeverity(Eq(absl::LogSeverity::kInfo))));
+
+  test_sink.StartCapturingLogs();
+  ABSL_TEST_LOG(LEVEL(-1)) << "hello world";
+}
+
+TEST_P(BasicLogTest, LevelClampsLargeValues) {
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
+
+  if (!LoggingEnabledAt(absl::LogSeverity::kError)) {
+    GTEST_SKIP() << "This test cases required ERROR log to be enabled";
+    return;
+  }
+
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(LogSeverity(Eq(absl::LogSeverity::kError))));
+
+  test_sink.StartCapturingLogs();
+  ABSL_TEST_LOG(LEVEL(static_cast<int>(absl::LogSeverity::kFatal) + 1))
+      << "hello world";
+}
+
+TEST(ErrnoPreservationTest, InSeverityExpression) {
+  errno = 77;
+  int saved_errno;
+  ABSL_TEST_LOG(LEVEL((saved_errno = errno, absl::LogSeverity::kInfo)));
+  EXPECT_THAT(saved_errno, Eq(77));
+}
+
+TEST(ErrnoPreservationTest, InStreamedExpression) {
+  if (!LoggingEnabledAt(absl::LogSeverity::kInfo)) {
+    GTEST_SKIP() << "This test cases required INFO log to be enabled";
+    return;
+  }
+
+  errno = 77;
+  int saved_errno = 0;
+  ABSL_TEST_LOG(INFO) << (saved_errno = errno, "hello world");
+  EXPECT_THAT(saved_errno, Eq(77));
+}
+
+TEST(ErrnoPreservationTest, AfterStatement) {
+  errno = 77;
+  ABSL_TEST_LOG(INFO);
+  const int saved_errno = errno;
+  EXPECT_THAT(saved_errno, Eq(77));
+}
+
+// Tests that using a variable/parameter in a logging statement suppresses
+// unused-variable/parameter warnings.
+// -----------------------------------------------------------------------
+class UnusedVariableWarningCompileTest {
+  // These four don't prove anything unless `ABSL_MIN_LOG_LEVEL` is greater than
+  // `kInfo`.
+  static void LoggedVariable() {
+    const int x = 0;
+    ABSL_TEST_LOG(INFO) << x;
+  }
+  static void LoggedParameter(const int x) { ABSL_TEST_LOG(INFO) << x; }
+  static void SeverityVariable() {
+    const int x = 0;
+    ABSL_TEST_LOG(LEVEL(x)) << "hello world";
+  }
+  static void SeverityParameter(const int x) {
+    ABSL_TEST_LOG(LEVEL(x)) << "hello world";
+  }
+};
+
+}  // namespace absl_log_internal
+
+#endif  // ABSL_LOG_LOG_BASIC_TEST_IMPL_H_
diff --git a/abseil-cpp/absl/log/log_benchmark.cc b/abseil-cpp/absl/log/log_benchmark.cc
new file mode 100644
index 0000000..45d9a5d
--- /dev/null
+++ b/abseil-cpp/absl/log/log_benchmark.cc
@@ -0,0 +1,97 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/globals.h"
+#include "absl/log/log.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+#include "absl/log/log_sink_registry.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+class NullLogSink : public absl::LogSink {
+ public:
+  NullLogSink() { absl::AddLogSink(this); }
+
+  ~NullLogSink() override { absl::RemoveLogSink(this); }
+
+  void Send(const absl::LogEntry&) override {}
+};
+
+constexpr int x = -1;
+
+void BM_SuccessfulBinaryCheck(benchmark::State& state) {
+  int n = 0;
+  while (state.KeepRunningBatch(8)) {
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    CHECK_GE(n, x);
+    ++n;
+  }
+  benchmark::DoNotOptimize(n);
+}
+BENCHMARK(BM_SuccessfulBinaryCheck);
+
+static void BM_SuccessfulUnaryCheck(benchmark::State& state) {
+  int n = 0;
+  while (state.KeepRunningBatch(8)) {
+    CHECK(n >= x);
+    CHECK(n >= x);
+    CHECK(n >= x);
+    CHECK(n >= x);
+    CHECK(n >= x);
+    CHECK(n >= x);
+    CHECK(n >= x);
+    CHECK(n >= x);
+    ++n;
+  }
+  benchmark::DoNotOptimize(n);
+}
+BENCHMARK(BM_SuccessfulUnaryCheck);
+
+static void BM_DisabledLogOverhead(benchmark::State& state) {
+  absl::ScopedStderrThreshold disable_stderr_logging(
+      absl::LogSeverityAtLeast::kInfinity);
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(
+      absl::LogSeverityAtLeast::kInfinity);
+  for (auto _ : state) {
+    LOG(INFO);
+  }
+}
+BENCHMARK(BM_DisabledLogOverhead);
+
+static void BM_EnabledLogOverhead(benchmark::State& state) {
+  absl::ScopedStderrThreshold stderr_logging(
+      absl::LogSeverityAtLeast::kInfinity);
+  absl::log_internal::ScopedMinLogLevel scoped_min_log_level(
+      absl::LogSeverityAtLeast::kInfo);
+  ABSL_ATTRIBUTE_UNUSED NullLogSink null_sink;
+  for (auto _ : state) {
+    LOG(INFO);
+  }
+}
+BENCHMARK(BM_EnabledLogOverhead);
+
+}  // namespace
+
diff --git a/abseil-cpp/absl/debugging/leak_check_disable.cc b/abseil-cpp/absl/log/log_entry.cc
similarity index 62%
copy from abseil-cpp/absl/debugging/leak_check_disable.cc
copy to abseil-cpp/absl/log/log_entry.cc
index 924d6e3..19c3b3f 100644
--- a/abseil-cpp/absl/debugging/leak_check_disable.cc
+++ b/abseil-cpp/absl/log/log_entry.cc
@@ -1,4 +1,5 @@
-// Copyright 2017 The Abseil Authors.
+//
+// Copyright 2022 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,9 +13,17 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Disable LeakSanitizer when this file is linked in.
-// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
-extern "C" int __lsan_is_turned_off();
-extern "C" int __lsan_is_turned_off() {
-  return 1;
-}
+#include "absl/log/log_entry.h"
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int LogEntry::kNoVerbosityLevel;
+constexpr int LogEntry::kNoVerboseLevel;
+#endif
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/log_entry.h b/abseil-cpp/absl/log/log_entry.h
new file mode 100644
index 0000000..9e4ae8e
--- /dev/null
+++ b/abseil-cpp/absl/log/log_entry.h
@@ -0,0 +1,220 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/log_entry.h
+// -----------------------------------------------------------------------------
+//
+// This header declares `class absl::LogEntry`, which represents a log record as
+// passed to `LogSink::Send`. Data returned by pointer or by reference or by
+// `absl::string_view` must be copied if they are needed after the lifetime of
+// the `absl::LogEntry`.
+
+#ifndef ABSL_LOG_LOG_ENTRY_H_
+#define ABSL_LOG_LOG_ENTRY_H_
+
+#include <cstddef>
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/config.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/time.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace log_internal {
+// Test only friend.
+class LogEntryTestPeer;
+class LogMessage;
+}  // namespace log_internal
+
+// LogEntry
+//
+// Represents a single entry in a log, i.e., one `LOG` statement or failed
+// `CHECK`.
+//
+// `LogEntry` is thread-compatible.
+class LogEntry final {
+ public:
+  using tid_t = log_internal::Tid;
+
+  // For non-verbose log entries, `verbosity()` returns `kNoVerbosityLevel`.
+  static constexpr int kNoVerbosityLevel = -1;
+  static constexpr int kNoVerboseLevel = -1;  // TO BE removed
+
+  // Pass `LogEntry` by reference, and do not store it as its state does not
+  // outlive the call to `LogSink::Send()`.
+  LogEntry(const LogEntry&) = delete;
+  LogEntry& operator=(const LogEntry&) = delete;
+
+  // Source file and line where the log message occurred.  Taken from `__FILE__`
+  // and `__LINE__` unless overridden by `LOG(...).AtLocation(...)`.
+  //
+  // Take special care not to use the values returned by `source_filename()` and
+  // `source_basename()` after the lifetime of the entry.  This is always
+  // incorrect, but it will often work in practice because they usually point
+  // into a statically allocated character array obtained from `__FILE__`.
+  // Statements like `LOG(INFO).AtLocation(std::string(...), ...)` will expose
+  // the bug.  If you need the data later, you must copy them.
+  absl::string_view source_filename() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return full_filename_;
+  }
+  absl::string_view source_basename() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return base_filename_;
+  }
+  int source_line() const { return line_; }
+
+  // LogEntry::prefix()
+  //
+  // True unless the metadata prefix was suppressed once by
+  // `LOG(...).NoPrefix()` or globally by `absl::EnableLogPrefix(false)`.
+  // Implies `text_message_with_prefix() == text_message()`.
+  bool prefix() const { return prefix_; }
+
+  // LogEntry::log_severity()
+  //
+  // Returns this entry's severity.  For `LOG`, taken from the first argument;
+  // for `CHECK`, always `absl::LogSeverity::kFatal`.
+  absl::LogSeverity log_severity() const { return severity_; }
+
+  // LogEntry::verbosity()
+  //
+  // Returns this entry's verbosity, or `kNoVerbosityLevel` for a non-verbose
+  // entry.  Verbosity control is not available outside of Google yet.
+  int verbosity() const { return verbose_level_; }
+
+  // LogEntry::timestamp()
+  //
+  // Returns the time at which this entry was written.  Captured during
+  // evaluation of `LOG`, but can be overridden by
+  // `LOG(...).WithTimestamp(...)`.
+  //
+  // Take care not to rely on timestamps increasing monotonically, or even to
+  // rely on timestamps having any particular relationship with reality (since
+  // they can be overridden).
+  absl::Time timestamp() const { return timestamp_; }
+
+  // LogEntry::tid()
+  //
+  // Returns the ID of the thread that wrote this entry.  Captured during
+  // evaluation of `LOG`, but can be overridden by `LOG(...).WithThreadID(...)`.
+  //
+  // Take care not to *rely* on reported thread IDs as they can be overridden as
+  // specified above.
+  tid_t tid() const { return tid_; }
+
+  // Text-formatted version of the log message.  An underlying buffer holds
+  // these contiguous data:
+  //
+  // * A prefix formed by formatting metadata (timestamp, filename, line number,
+  //   etc.)
+  //   The prefix may be empty - see `LogEntry::prefix()` - and may rarely be
+  //   truncated if the metadata are very long.
+  // * The streamed data
+  //   The data may be empty if nothing was streamed, or may be truncated to fit
+  //   the buffer.
+  // * A newline
+  // * A nul terminator
+  //
+  // The newline and nul terminator will be present even if the prefix and/or
+  // data are truncated.
+  //
+  // These methods give access to the most commonly useful substrings of the
+  // buffer's contents.  Other combinations can be obtained with substring
+  // arithmetic.
+  //
+  // The buffer does not outlive the entry; if you need the data later, you must
+  // copy them.
+  absl::string_view text_message_with_prefix_and_newline() const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return absl::string_view(
+        text_message_with_prefix_and_newline_and_nul_.data(),
+        text_message_with_prefix_and_newline_and_nul_.size() - 1);
+  }
+  absl::string_view text_message_with_prefix() const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return absl::string_view(
+        text_message_with_prefix_and_newline_and_nul_.data(),
+        text_message_with_prefix_and_newline_and_nul_.size() - 2);
+  }
+  absl::string_view text_message_with_newline() const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return absl::string_view(
+        text_message_with_prefix_and_newline_and_nul_.data() + prefix_len_,
+        text_message_with_prefix_and_newline_and_nul_.size() - prefix_len_ - 1);
+  }
+  absl::string_view text_message() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return absl::string_view(
+        text_message_with_prefix_and_newline_and_nul_.data() + prefix_len_,
+        text_message_with_prefix_and_newline_and_nul_.size() - prefix_len_ - 2);
+  }
+  const char* text_message_with_prefix_and_newline_c_str() const
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return text_message_with_prefix_and_newline_and_nul_.data();
+  }
+
+  // Returns a serialized protobuf holding the operands streamed into this
+  // log message.  The message definition is not yet published.
+  //
+  // The buffer does not outlive the entry; if you need the data later, you must
+  // copy them.
+  absl::string_view encoded_message() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return encoding_;
+  }
+
+  // LogEntry::stacktrace()
+  //
+  // Optional stacktrace, e.g. for `FATAL` logs and failed `CHECK`s.
+  //
+  // Fatal entries are dispatched to each sink twice: first with all data and
+  // metadata but no stacktrace, and then with the stacktrace.  This is done
+  // because stacktrace collection is sometimes slow and fallible, and it's
+  // critical to log enough information to diagnose the failure even if the
+  // stacktrace collection hangs.
+  //
+  // The buffer does not outlive the entry; if you need the data later, you must
+  // copy them.
+  absl::string_view stacktrace() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return stacktrace_;
+  }
+
+ private:
+  LogEntry() = default;
+
+  absl::string_view full_filename_;
+  absl::string_view base_filename_;
+  int line_;
+  bool prefix_;
+  absl::LogSeverity severity_;
+  int verbose_level_;  // >=0 for `VLOG`, etc.; otherwise `kNoVerbosityLevel`.
+  absl::Time timestamp_;
+  tid_t tid_;
+  absl::Span<const char> text_message_with_prefix_and_newline_and_nul_;
+  size_t prefix_len_;
+  absl::string_view encoding_;
+  std::string stacktrace_;
+
+  friend class log_internal::LogEntryTestPeer;
+  friend class log_internal::LogMessage;
+};
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_LOG_ENTRY_H_
diff --git a/abseil-cpp/absl/log/log_entry_test.cc b/abseil-cpp/absl/log/log_entry_test.cc
new file mode 100644
index 0000000..d9bfa1f
--- /dev/null
+++ b/abseil-cpp/absl/log/log_entry_test.cc
@@ -0,0 +1,468 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/log_entry.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cstring>
+#include <limits>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/append_truncated.h"
+#include "absl/log/internal/log_format.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/str_split.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/civil_time.h"
+#include "absl/time/time.h"
+#include "absl/types/span.h"
+
+namespace {
+using ::absl::log_internal::LogEntryTestPeer;
+using ::testing::Eq;
+using ::testing::IsTrue;
+using ::testing::StartsWith;
+using ::testing::StrEq;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+}  // namespace
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace log_internal {
+
+class LogEntryTestPeer {
+ public:
+  LogEntryTestPeer(absl::string_view base_filename, int line, bool prefix,
+                   absl::LogSeverity severity, absl::string_view timestamp,
+                   absl::LogEntry::tid_t tid, PrefixFormat format,
+                   absl::string_view text_message)
+      : format_{format}, buf_(15000, '\0') {
+    entry_.base_filename_ = base_filename;
+    entry_.line_ = line;
+    entry_.prefix_ = prefix;
+    entry_.severity_ = severity;
+    std::string time_err;
+    EXPECT_THAT(
+        absl::ParseTime("%Y-%m-%d%ET%H:%M:%E*S", timestamp,
+                        absl::LocalTimeZone(), &entry_.timestamp_, &time_err),
+        IsTrue())
+        << "Failed to parse time " << timestamp << ": " << time_err;
+    entry_.tid_ = tid;
+    std::pair<absl::string_view, std::string> timestamp_bits =
+        absl::StrSplit(timestamp, absl::ByChar('.'));
+    EXPECT_THAT(absl::ParseCivilTime(timestamp_bits.first, &ci_.cs), IsTrue())
+        << "Failed to parse time " << timestamp_bits.first;
+    timestamp_bits.second.resize(9, '0');
+    int64_t nanos = 0;
+    EXPECT_THAT(absl::SimpleAtoi(timestamp_bits.second, &nanos), IsTrue())
+        << "Failed to parse time " << timestamp_bits.first;
+    ci_.subsecond = absl::Nanoseconds(nanos);
+
+    absl::Span<char> view = absl::MakeSpan(buf_);
+    view.remove_suffix(2);
+    entry_.prefix_len_ =
+        entry_.prefix_
+            ? log_internal::FormatLogPrefix(
+                  entry_.log_severity(), entry_.timestamp(), entry_.tid(),
+                  entry_.source_basename(), entry_.source_line(), format_, view)
+            : 0;
+
+    EXPECT_THAT(entry_.prefix_len_,
+                Eq(static_cast<size_t>(view.data() - buf_.data())));
+    log_internal::AppendTruncated(text_message, view);
+    view = absl::Span<char>(view.data(), view.size() + 2);
+    view[0] = '\n';
+    view[1] = '\0';
+    view.remove_prefix(2);
+    buf_.resize(static_cast<size_t>(view.data() - buf_.data()));
+    entry_.text_message_with_prefix_and_newline_and_nul_ = absl::MakeSpan(buf_);
+  }
+  LogEntryTestPeer(const LogEntryTestPeer&) = delete;
+  LogEntryTestPeer& operator=(const LogEntryTestPeer&) = delete;
+
+  std::string FormatLogMessage() const {
+    return log_internal::FormatLogMessage(
+        entry_.log_severity(), ci_.cs, ci_.subsecond, entry_.tid(),
+        entry_.source_basename(), entry_.source_line(), format_,
+        entry_.text_message());
+  }
+  std::string FormatPrefixIntoSizedBuffer(size_t sz) {
+    std::string str(sz, '\0');
+    absl::Span<char> buf(&str[0], str.size());
+    const size_t prefix_size = log_internal::FormatLogPrefix(
+        entry_.log_severity(), entry_.timestamp(), entry_.tid(),
+        entry_.source_basename(), entry_.source_line(), format_, buf);
+    EXPECT_THAT(prefix_size, Eq(static_cast<size_t>(buf.data() - str.data())));
+    str.resize(prefix_size);
+    return str;
+  }
+  const absl::LogEntry& entry() const { return entry_; }
+
+ private:
+  absl::LogEntry entry_;
+  PrefixFormat format_;
+  absl::TimeZone::CivilInfo ci_;
+  std::vector<char> buf_;
+};
+
+}  // namespace log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+namespace {
+constexpr bool kUsePrefix = true, kNoPrefix = false;
+
+TEST(LogEntryTest, Baseline) {
+  LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo,
+                         "2020-01-02T03:04:05.6789", 451,
+                         absl::log_internal::PrefixFormat::kNotRaw,
+                         "hello world");
+  EXPECT_THAT(entry.FormatLogMessage(),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] hello world"));
+  EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] "));
+  for (size_t sz = strlen("I0102 03:04:05.678900     451 foo.cc:1234] ") + 20;
+       sz != std::numeric_limits<size_t>::max(); sz--)
+    EXPECT_THAT("I0102 03:04:05.678900     451 foo.cc:1234] ",
+                StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+  EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] hello world\n"));
+  EXPECT_THAT(
+      entry.entry().text_message_with_prefix_and_newline_c_str(),
+      StrEq("I0102 03:04:05.678900     451 foo.cc:1234] hello world\n"));
+  EXPECT_THAT(entry.entry().text_message_with_prefix(),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] hello world"));
+  EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
+}
+
+TEST(LogEntryTest, NoPrefix) {
+  LogEntryTestPeer entry("foo.cc", 1234, kNoPrefix, absl::LogSeverity::kInfo,
+                         "2020-01-02T03:04:05.6789", 451,
+                         absl::log_internal::PrefixFormat::kNotRaw,
+                         "hello world");
+  EXPECT_THAT(entry.FormatLogMessage(),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] hello world"));
+  // These methods are not responsible for honoring `prefix()`.
+  EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] "));
+  for (size_t sz = strlen("I0102 03:04:05.678900     451 foo.cc:1234] ") + 20;
+       sz != std::numeric_limits<size_t>::max(); sz--)
+    EXPECT_THAT("I0102 03:04:05.678900     451 foo.cc:1234] ",
+                StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+  EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
+              Eq("hello world\n"));
+  EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(),
+              StrEq("hello world\n"));
+  EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("hello world"));
+  EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
+}
+
+TEST(LogEntryTest, EmptyFields) {
+  LogEntryTestPeer entry("", 0, kUsePrefix, absl::LogSeverity::kInfo,
+                         "2020-01-02T03:04:05", 0,
+                         absl::log_internal::PrefixFormat::kNotRaw, "");
+  const std::string format_message = entry.FormatLogMessage();
+  EXPECT_THAT(format_message, Eq("I0102 03:04:05.000000       0 :0] "));
+  EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq(format_message));
+  for (size_t sz = format_message.size() + 20;
+       sz != std::numeric_limits<size_t>::max(); sz--)
+    EXPECT_THAT(format_message,
+                StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+  EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
+              Eq("I0102 03:04:05.000000       0 :0] \n"));
+  EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(),
+              StrEq("I0102 03:04:05.000000       0 :0] \n"));
+  EXPECT_THAT(entry.entry().text_message_with_prefix(),
+              Eq("I0102 03:04:05.000000       0 :0] "));
+  EXPECT_THAT(entry.entry().text_message(), Eq(""));
+}
+
+TEST(LogEntryTest, NegativeFields) {
+  // When Abseil's minimum C++ version is C++17, this conditional can be
+  // converted to a constexpr if and the static_cast below removed.
+  if (std::is_signed<absl::LogEntry::tid_t>::value) {
+    LogEntryTestPeer entry(
+        "foo.cc", -1234, kUsePrefix, absl::LogSeverity::kInfo,
+        "2020-01-02T03:04:05.6789", static_cast<absl::LogEntry::tid_t>(-451),
+        absl::log_internal::PrefixFormat::kNotRaw, "hello world");
+    EXPECT_THAT(entry.FormatLogMessage(),
+                Eq("I0102 03:04:05.678900    -451 foo.cc:-1234] hello world"));
+    EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+                Eq("I0102 03:04:05.678900    -451 foo.cc:-1234] "));
+    for (size_t sz =
+             strlen("I0102 03:04:05.678900    -451 foo.cc:-1234] ") + 20;
+         sz != std::numeric_limits<size_t>::max(); sz--)
+      EXPECT_THAT("I0102 03:04:05.678900    -451 foo.cc:-1234] ",
+                  StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline(),
+        Eq("I0102 03:04:05.678900    -451 foo.cc:-1234] hello world\n"));
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline_c_str(),
+        StrEq("I0102 03:04:05.678900    -451 foo.cc:-1234] hello world\n"));
+    EXPECT_THAT(entry.entry().text_message_with_prefix(),
+                Eq("I0102 03:04:05.678900    -451 foo.cc:-1234] hello world"));
+    EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
+  } else {
+    LogEntryTestPeer entry("foo.cc", -1234, kUsePrefix,
+                           absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789",
+                           451, absl::log_internal::PrefixFormat::kNotRaw,
+                           "hello world");
+    EXPECT_THAT(entry.FormatLogMessage(),
+                Eq("I0102 03:04:05.678900     451 foo.cc:-1234] hello world"));
+    EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+                Eq("I0102 03:04:05.678900     451 foo.cc:-1234] "));
+    for (size_t sz =
+             strlen("I0102 03:04:05.678900     451 foo.cc:-1234] ") + 20;
+         sz != std::numeric_limits<size_t>::max(); sz--)
+      EXPECT_THAT("I0102 03:04:05.678900     451 foo.cc:-1234] ",
+                  StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline(),
+        Eq("I0102 03:04:05.678900     451 foo.cc:-1234] hello world\n"));
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline_c_str(),
+        StrEq("I0102 03:04:05.678900     451 foo.cc:-1234] hello world\n"));
+    EXPECT_THAT(entry.entry().text_message_with_prefix(),
+                Eq("I0102 03:04:05.678900     451 foo.cc:-1234] hello world"));
+    EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
+  }
+}
+
+TEST(LogEntryTest, LongFields) {
+  LogEntryTestPeer entry(
+      "I am the very model of a modern Major-General / "
+      "I've information vegetable, animal, and mineral.",
+      2147483647, kUsePrefix, absl::LogSeverity::kInfo,
+      "2020-01-02T03:04:05.678967896789", 2147483647,
+      absl::log_internal::PrefixFormat::kNotRaw,
+      "I know the kings of England, and I quote the fights historical / "
+      "From Marathon to Waterloo, in order categorical.");
+  EXPECT_THAT(entry.FormatLogMessage(),
+              Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+                 "modern Major-General / I've information vegetable, animal, "
+                 "and mineral.:2147483647] I know the kings of England, and I "
+                 "quote the fights historical / From Marathon to Waterloo, in "
+                 "order categorical."));
+  EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+              Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+                 "modern Major-General / I've information vegetable, animal, "
+                 "and mineral.:2147483647] "));
+  for (size_t sz =
+           strlen("I0102 03:04:05.678967 2147483647 I am the very model of a "
+                  "modern Major-General / I've information vegetable, animal, "
+                  "and mineral.:2147483647] ") +
+           20;
+       sz != std::numeric_limits<size_t>::max(); sz--)
+    EXPECT_THAT(
+        "I0102 03:04:05.678967 2147483647 I am the very model of a "
+        "modern Major-General / I've information vegetable, animal, "
+        "and mineral.:2147483647] ",
+        StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+  EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
+              Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+                 "modern Major-General / I've information vegetable, animal, "
+                 "and mineral.:2147483647] I know the kings of England, and I "
+                 "quote the fights historical / From Marathon to Waterloo, in "
+                 "order categorical.\n"));
+  EXPECT_THAT(
+      entry.entry().text_message_with_prefix_and_newline_c_str(),
+      StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+            "modern Major-General / I've information vegetable, animal, "
+            "and mineral.:2147483647] I know the kings of England, and I "
+            "quote the fights historical / From Marathon to Waterloo, in "
+            "order categorical.\n"));
+  EXPECT_THAT(entry.entry().text_message_with_prefix(),
+              Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+                 "modern Major-General / I've information vegetable, animal, "
+                 "and mineral.:2147483647] I know the kings of England, and I "
+                 "quote the fights historical / From Marathon to Waterloo, in "
+                 "order categorical."));
+  EXPECT_THAT(
+      entry.entry().text_message(),
+      Eq("I know the kings of England, and I quote the fights historical / "
+         "From Marathon to Waterloo, in order categorical."));
+}
+
+TEST(LogEntryTest, LongNegativeFields) {
+  // When Abseil's minimum C++ version is C++17, this conditional can be
+  // converted to a constexpr if and the static_cast below removed.
+  if (std::is_signed<absl::LogEntry::tid_t>::value) {
+    LogEntryTestPeer entry(
+        "I am the very model of a modern Major-General / "
+        "I've information vegetable, animal, and mineral.",
+        -2147483647, kUsePrefix, absl::LogSeverity::kInfo,
+        "2020-01-02T03:04:05.678967896789",
+        static_cast<absl::LogEntry::tid_t>(-2147483647),
+        absl::log_internal::PrefixFormat::kNotRaw,
+        "I know the kings of England, and I quote the fights historical / "
+        "From Marathon to Waterloo, in order categorical.");
+    EXPECT_THAT(
+        entry.FormatLogMessage(),
+        Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
+           "modern Major-General / I've information vegetable, animal, "
+           "and mineral.:-2147483647] I know the kings of England, and I "
+           "quote the fights historical / From Marathon to Waterloo, in "
+           "order categorical."));
+    EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+                Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
+                   "modern Major-General / I've information vegetable, animal, "
+                   "and mineral.:-2147483647] "));
+    for (size_t sz =
+             strlen(
+                 "I0102 03:04:05.678967 -2147483647 I am the very model of a "
+                 "modern Major-General / I've information vegetable, animal, "
+                 "and mineral.:-2147483647] ") +
+             20;
+         sz != std::numeric_limits<size_t>::max(); sz--)
+      EXPECT_THAT(
+          "I0102 03:04:05.678967 -2147483647 I am the very model of a "
+          "modern Major-General / I've information vegetable, animal, "
+          "and mineral.:-2147483647] ",
+          StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline(),
+        Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
+           "modern Major-General / I've information vegetable, animal, "
+           "and mineral.:-2147483647] I know the kings of England, and I "
+           "quote the fights historical / From Marathon to Waterloo, in "
+           "order categorical.\n"));
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline_c_str(),
+        StrEq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
+              "modern Major-General / I've information vegetable, animal, "
+              "and mineral.:-2147483647] I know the kings of England, and I "
+              "quote the fights historical / From Marathon to Waterloo, in "
+              "order categorical.\n"));
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix(),
+        Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
+           "modern Major-General / I've information vegetable, animal, "
+           "and mineral.:-2147483647] I know the kings of England, and I "
+           "quote the fights historical / From Marathon to Waterloo, in "
+           "order categorical."));
+    EXPECT_THAT(
+        entry.entry().text_message(),
+        Eq("I know the kings of England, and I quote the fights historical / "
+           "From Marathon to Waterloo, in order categorical."));
+  } else {
+    LogEntryTestPeer entry(
+        "I am the very model of a modern Major-General / "
+        "I've information vegetable, animal, and mineral.",
+        -2147483647, kUsePrefix, absl::LogSeverity::kInfo,
+        "2020-01-02T03:04:05.678967896789", 2147483647,
+        absl::log_internal::PrefixFormat::kNotRaw,
+        "I know the kings of England, and I quote the fights historical / "
+        "From Marathon to Waterloo, in order categorical.");
+    EXPECT_THAT(
+        entry.FormatLogMessage(),
+        Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+           "modern Major-General / I've information vegetable, animal, "
+           "and mineral.:-2147483647] I know the kings of England, and I "
+           "quote the fights historical / From Marathon to Waterloo, in "
+           "order categorical."));
+    EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+                Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+                   "modern Major-General / I've information vegetable, animal, "
+                   "and mineral.:-2147483647] "));
+    for (size_t sz =
+             strlen(
+                 "I0102 03:04:05.678967 2147483647 I am the very model of a "
+                 "modern Major-General / I've information vegetable, animal, "
+                 "and mineral.:-2147483647] ") +
+             20;
+         sz != std::numeric_limits<size_t>::max(); sz--)
+      EXPECT_THAT(
+          "I0102 03:04:05.678967 2147483647 I am the very model of a "
+          "modern Major-General / I've information vegetable, animal, "
+          "and mineral.:-2147483647] ",
+          StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline(),
+        Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+           "modern Major-General / I've information vegetable, animal, "
+           "and mineral.:-2147483647] I know the kings of England, and I "
+           "quote the fights historical / From Marathon to Waterloo, in "
+           "order categorical.\n"));
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix_and_newline_c_str(),
+        StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+              "modern Major-General / I've information vegetable, animal, "
+              "and mineral.:-2147483647] I know the kings of England, and I "
+              "quote the fights historical / From Marathon to Waterloo, in "
+              "order categorical.\n"));
+    EXPECT_THAT(
+        entry.entry().text_message_with_prefix(),
+        Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
+           "modern Major-General / I've information vegetable, animal, "
+           "and mineral.:-2147483647] I know the kings of England, and I "
+           "quote the fights historical / From Marathon to Waterloo, in "
+           "order categorical."));
+    EXPECT_THAT(
+        entry.entry().text_message(),
+        Eq("I know the kings of England, and I quote the fights historical / "
+           "From Marathon to Waterloo, in order categorical."));
+  }
+}
+
+TEST(LogEntryTest, Raw) {
+  LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo,
+                         "2020-01-02T03:04:05.6789", 451,
+                         absl::log_internal::PrefixFormat::kRaw, "hello world");
+  EXPECT_THAT(
+      entry.FormatLogMessage(),
+      Eq("I0102 03:04:05.678900     451 foo.cc:1234] RAW: hello world"));
+  EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
+              Eq("I0102 03:04:05.678900     451 foo.cc:1234] RAW: "));
+  for (size_t sz =
+           strlen("I0102 03:04:05.678900     451 foo.cc:1234] RAW: ") + 20;
+       sz != std::numeric_limits<size_t>::max(); sz--)
+    EXPECT_THAT("I0102 03:04:05.678900     451 foo.cc:1234] RAW: ",
+                StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
+
+  EXPECT_THAT(
+      entry.entry().text_message_with_prefix_and_newline(),
+      Eq("I0102 03:04:05.678900     451 foo.cc:1234] RAW: hello world\n"));
+  EXPECT_THAT(
+      entry.entry().text_message_with_prefix_and_newline_c_str(),
+      StrEq("I0102 03:04:05.678900     451 foo.cc:1234] RAW: hello world\n"));
+  EXPECT_THAT(
+      entry.entry().text_message_with_prefix(),
+      Eq("I0102 03:04:05.678900     451 foo.cc:1234] RAW: hello world"));
+  EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/log_format_test.cc b/abseil-cpp/absl/log/log_format_test.cc
new file mode 100644
index 0000000..dbad5d9
--- /dev/null
+++ b/abseil-cpp/absl/log/log_format_test.cc
@@ -0,0 +1,1872 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <math.h>
+
+#include <iomanip>
+#include <ios>
+#include <limits>
+#include <ostream>
+#include <sstream>
+#include <string>
+#include <type_traits>
+
+#ifdef __ANDROID__
+#include <android/api-level.h>
+#endif
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/log/check.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/log/scoped_mock_log.h"
+#include "absl/strings/match.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+
+namespace {
+using ::absl::log_internal::AsString;
+using ::absl::log_internal::MatchesOstream;
+using ::absl::log_internal::RawEncodedMessage;
+using ::absl::log_internal::TextMessage;
+using ::absl::log_internal::TextPrefix;
+using ::testing::AllOf;
+using ::testing::AnyOf;
+using ::testing::Each;
+using ::testing::EndsWith;
+using ::testing::Eq;
+using ::testing::Ge;
+using ::testing::IsEmpty;
+using ::testing::Le;
+using ::testing::SizeIs;
+using ::testing::Types;
+
+// Some aspects of formatting streamed data (e.g. pointer handling) are
+// implementation-defined.  Others are buggy in supported implementations.
+// These tests validate that the formatting matches that performed by a
+// `std::ostream` and also that the result is one of a list of expected formats.
+
+std::ostringstream ComparisonStream() {
+  std::ostringstream str;
+  str.setf(std::ios_base::showbase | std::ios_base::boolalpha |
+           std::ios_base::internal);
+  return str;
+}
+
+TEST(LogFormatTest, NoMessage) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int log_line = __LINE__ + 1;
+  auto do_log = [] { LOG(INFO); };
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(ComparisonStream())),
+                         TextPrefix(AsString(EndsWith(absl::StrCat(
+                             " log_format_test.cc:", log_line, "] ")))),
+                         TextMessage(IsEmpty()),
+                         ENCODED_MESSAGE(EqualsProto(R"pb()pb")))));
+
+  test_sink.StartCapturingLogs();
+  do_log();
+}
+
+template <typename T>
+class CharLogFormatTest : public testing::Test {};
+using CharTypes = Types<char, signed char, unsigned char>;
+TYPED_TEST_SUITE(CharLogFormatTest, CharTypes);
+
+TYPED_TEST(CharLogFormatTest, Printable) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = 'x';
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("x")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "x" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(CharLogFormatTest, Unprintable) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  constexpr auto value = static_cast<TypeParam>(0xeeu);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("\xee")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "\xee"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+template <typename T>
+class UnsignedIntLogFormatTest : public testing::Test {};
+using UnsignedIntTypes = Types<unsigned short, unsigned int,        // NOLINT
+                               unsigned long, unsigned long long>;  // NOLINT
+TYPED_TEST_SUITE(UnsignedIntLogFormatTest, UnsignedIntTypes);
+
+TYPED_TEST(UnsignedIntLogFormatTest, Positive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = 224;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("224")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(UnsignedIntLogFormatTest, BitfieldPositive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const struct {
+    TypeParam bits : 6;
+  } value{42};
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value.bits;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("42")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "42" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value.bits;
+}
+
+template <typename T>
+class SignedIntLogFormatTest : public testing::Test {};
+using SignedIntTypes =
+    Types<signed short, signed int, signed long, signed long long>;  // NOLINT
+TYPED_TEST_SUITE(SignedIntLogFormatTest, SignedIntTypes);
+
+TYPED_TEST(SignedIntLogFormatTest, Positive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = 224;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("224")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(SignedIntLogFormatTest, Negative) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = -112;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("-112")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "-112"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(SignedIntLogFormatTest, BitfieldPositive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const struct {
+    TypeParam bits : 6;
+  } value{21};
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value.bits;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("21")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "21" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value.bits;
+}
+
+TYPED_TEST(SignedIntLogFormatTest, BitfieldNegative) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const struct {
+    TypeParam bits : 6;
+  } value{-21};
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value.bits;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("-21")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "-21" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value.bits;
+}
+
+// Ignore these test cases on GCC due to "is too small to hold all values ..."
+// warning.
+#if !defined(__GNUC__) || defined(__clang__)
+// The implementation may choose a signed or unsigned integer type to represent
+// this enum, so it may be tested by either `UnsignedEnumLogFormatTest` or
+// `SignedEnumLogFormatTest`.
+enum MyUnsignedEnum {
+  MyUnsignedEnum_ZERO = 0,
+  MyUnsignedEnum_FORTY_TWO = 42,
+  MyUnsignedEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
+};
+enum MyUnsignedIntEnum : unsigned int {
+  MyUnsignedIntEnum_ZERO = 0,
+  MyUnsignedIntEnum_FORTY_TWO = 42,
+  MyUnsignedIntEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
+};
+
+template <typename T>
+class UnsignedEnumLogFormatTest : public testing::Test {};
+using UnsignedEnumTypes = std::conditional<
+    std::is_signed<std::underlying_type<MyUnsignedEnum>::type>::value,
+    Types<MyUnsignedIntEnum>, Types<MyUnsignedEnum, MyUnsignedIntEnum>>::type;
+TYPED_TEST_SUITE(UnsignedEnumLogFormatTest, UnsignedEnumTypes);
+
+TYPED_TEST(UnsignedEnumLogFormatTest, Positive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = static_cast<TypeParam>(224);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("224")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(UnsignedEnumLogFormatTest, BitfieldPositive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const struct {
+    TypeParam bits : 6;
+  } value{static_cast<TypeParam>(42)};
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value.bits;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("42")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "42" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value.bits;
+}
+
+enum MySignedEnum {
+  MySignedEnum_NEGATIVE_ONE_HUNDRED_TWELVE = -112,
+  MySignedEnum_NEGATIVE_TWENTY_ONE = -21,
+  MySignedEnum_ZERO = 0,
+  MySignedEnum_TWENTY_ONE = 21,
+  MySignedEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
+};
+enum MySignedIntEnum : signed int {
+  MySignedIntEnum_NEGATIVE_ONE_HUNDRED_TWELVE = -112,
+  MySignedIntEnum_NEGATIVE_TWENTY_ONE = -21,
+  MySignedIntEnum_ZERO = 0,
+  MySignedIntEnum_TWENTY_ONE = 21,
+  MySignedIntEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
+};
+
+template <typename T>
+class SignedEnumLogFormatTest : public testing::Test {};
+using SignedEnumTypes = std::conditional<
+    std::is_signed<std::underlying_type<MyUnsignedEnum>::type>::value,
+    Types<MyUnsignedEnum, MySignedEnum, MySignedIntEnum>,
+    Types<MySignedEnum, MySignedIntEnum>>::type;
+TYPED_TEST_SUITE(SignedEnumLogFormatTest, SignedEnumTypes);
+
+TYPED_TEST(SignedEnumLogFormatTest, Positive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = static_cast<TypeParam>(224);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("224")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(SignedEnumLogFormatTest, Negative) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = static_cast<TypeParam>(-112);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("-112")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "-112"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(SignedEnumLogFormatTest, BitfieldPositive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const struct {
+    TypeParam bits : 6;
+  } value{static_cast<TypeParam>(21)};
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value.bits;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("21")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "21" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value.bits;
+}
+
+TYPED_TEST(SignedEnumLogFormatTest, BitfieldNegative) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const struct {
+    TypeParam bits : 6;
+  } value{static_cast<TypeParam>(-21)};
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value.bits;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("-21")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "-21" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value.bits;
+}
+#endif
+
+TEST(FloatLogFormatTest, Positive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const float value = 6.02e23f;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("6.02e+23")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "6.02e+23"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(FloatLogFormatTest, Negative) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const float value = -6.02e23f;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-6.02e+23")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "-6.02e+23"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(FloatLogFormatTest, NegativeExponent) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const float value = 6.02e-23f;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("6.02e-23")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "6.02e-23"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(DoubleLogFormatTest, Positive) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 6.02e23;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("6.02e+23")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "6.02e+23"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(DoubleLogFormatTest, Negative) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = -6.02e23;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-6.02e+23")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "-6.02e+23"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(DoubleLogFormatTest, NegativeExponent) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 6.02e-23;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("6.02e-23")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "6.02e-23"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+template <typename T>
+class FloatingPointLogFormatTest : public testing::Test {};
+using FloatingPointTypes = Types<float, double>;
+TYPED_TEST_SUITE(FloatingPointLogFormatTest, FloatingPointTypes);
+
+TYPED_TEST(FloatingPointLogFormatTest, Zero) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = 0.0;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("0")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "0" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(FloatingPointLogFormatTest, Integer) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = 1.0;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("1")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "1" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(FloatingPointLogFormatTest, Infinity) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = std::numeric_limits<TypeParam>::infinity();
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("inf"), Eq("Inf"))),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "inf" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(FloatingPointLogFormatTest, NegativeInfinity) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = -std::numeric_limits<TypeParam>::infinity();
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(AnyOf(Eq("-inf"), Eq("-Inf"))),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "-inf"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(FloatingPointLogFormatTest, NaN) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = std::numeric_limits<TypeParam>::quiet_NaN();
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("nan"), Eq("NaN"))),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "nan" })pb")))));
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(FloatingPointLogFormatTest, NegativeNaN) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value =
+      std::copysign(std::numeric_limits<TypeParam>::quiet_NaN(), -1.0);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(MatchesOstream(comparison_stream)),
+          TextMessage(AnyOf(Eq("-nan"), Eq("nan"), Eq("NaN"), Eq("-nan(ind)"))),
+          ENCODED_MESSAGE(
+              AnyOf(EqualsProto(R"pb(value { str: "-nan" })pb"),
+                    EqualsProto(R"pb(value { str: "nan" })pb"),
+                    EqualsProto(R"pb(value { str: "-nan(ind)" })pb"))))));
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+template <typename T>
+class VoidPtrLogFormatTest : public testing::Test {};
+using VoidPtrTypes = Types<void *, const void *>;
+TYPED_TEST_SUITE(VoidPtrLogFormatTest, VoidPtrTypes);
+
+TYPED_TEST(VoidPtrLogFormatTest, Null) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = nullptr;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("(nil)"), Eq("0"), Eq("0x0"),
+                                   Eq("00000000"), Eq("0000000000000000"))))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(VoidPtrLogFormatTest, NonNull) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = reinterpret_cast<TypeParam>(0xdeadbeefULL);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(MatchesOstream(comparison_stream)),
+          TextMessage(
+              AnyOf(Eq("0xdeadbeef"), Eq("DEADBEEF"), Eq("00000000DEADBEEF"))),
+          ENCODED_MESSAGE(AnyOf(
+              EqualsProto(R"pb(value { str: "0xdeadbeef" })pb"),
+              EqualsProto(R"pb(value { str: "00000000DEADBEEF" })pb"))))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+template <typename T>
+class VolatilePtrLogFormatTest : public testing::Test {};
+using VolatilePtrTypes =
+    Types<volatile void*, const volatile void*, volatile char*,
+          const volatile char*, volatile signed char*,
+          const volatile signed char*, volatile unsigned char*,
+          const volatile unsigned char*>;
+TYPED_TEST_SUITE(VolatilePtrLogFormatTest, VolatilePtrTypes);
+
+TYPED_TEST(VolatilePtrLogFormatTest, Null) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = nullptr;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("false")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "false"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(VolatilePtrLogFormatTest, NonNull) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const TypeParam value = reinterpret_cast<TypeParam>(0xdeadbeefLL);
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("true")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "true"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+template <typename T>
+class CharPtrLogFormatTest : public testing::Test {};
+using CharPtrTypes = Types<char, const char, signed char, const signed char,
+                           unsigned char, const unsigned char>;
+TYPED_TEST_SUITE(CharPtrLogFormatTest, CharPtrTypes);
+
+TYPED_TEST(CharPtrLogFormatTest, Null) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  // Streaming `([cv] char *)nullptr` into a `std::ostream` is UB, and some C++
+  // standard library implementations choose to crash.  We take measures to log
+  // something useful instead of crashing, even when that differs from the
+  // standard library in use (and thus the behavior of `std::ostream`).
+  TypeParam* const value = nullptr;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          // `MatchesOstream` deliberately omitted since we deliberately differ.
+          TextMessage(Eq("(null)")),
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(null)" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TYPED_TEST(CharPtrLogFormatTest, NonNull) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  TypeParam data[] = {'v', 'a', 'l', 'u', 'e', '\0'};
+  TypeParam* const value = data;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("value")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "value"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(BoolLogFormatTest, True) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const bool value = true;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("true")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "true"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(BoolLogFormatTest, False) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const bool value = false;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("false")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "false"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+TEST(LogFormatTest, StringLiteral) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << "value";
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("value")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            literal: "value"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << "value";
+}
+
+TEST(LogFormatTest, CharArray) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  char value[] = "value";
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("value")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "value"
+                                                             })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+class CustomClass {};
+std::ostream& operator<<(std::ostream& os, const CustomClass&) {
+  return os << "CustomClass{}";
+}
+
+TEST(LogFormatTest, Custom) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  CustomClass value;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("CustomClass{}")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "CustomClass{}"
+                                                          })pb")))));
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+class CustomClassNonCopyable {
+ public:
+  CustomClassNonCopyable() = default;
+  CustomClassNonCopyable(const CustomClassNonCopyable&) = delete;
+  CustomClassNonCopyable& operator=(const CustomClassNonCopyable&) = delete;
+};
+std::ostream& operator<<(std::ostream& os, const CustomClassNonCopyable&) {
+  return os << "CustomClassNonCopyable{}";
+}
+
+TEST(LogFormatTest, CustomNonCopyable) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  CustomClassNonCopyable value;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("CustomClassNonCopyable{}")),
+                 ENCODED_MESSAGE(EqualsProto(
+                     R"pb(value { str: "CustomClassNonCopyable{}" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value;
+}
+
+struct Point {
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, const Point& p) {
+    absl::Format(&sink, "(%d, %d)", p.x, p.y);
+  }
+
+  int x = 10;
+  int y = 20;
+};
+
+TEST(LogFormatTest, AbslStringifyExample) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  Point p;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << p;
+}
+
+struct PointWithAbslStringifiyAndOstream {
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink,
+                            const PointWithAbslStringifiyAndOstream& p) {
+    absl::Format(&sink, "(%d, %d)", p.x, p.y);
+  }
+
+  int x = 10;
+  int y = 20;
+};
+
+ABSL_ATTRIBUTE_UNUSED std::ostream& operator<<(
+    std::ostream& os, const PointWithAbslStringifiyAndOstream&) {
+  return os << "Default to AbslStringify()";
+}
+
+TEST(LogFormatTest, CustomWithAbslStringifyAndOstream) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  PointWithAbslStringifiyAndOstream p;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << p;
+}
+
+struct PointStreamsNothing {
+  template <typename Sink>
+  friend void AbslStringify(Sink&, const PointStreamsNothing&) {}
+
+  int x = 10;
+  int y = 20;
+};
+
+TEST(LogFormatTest, AbslStringifyStreamsNothing) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  PointStreamsNothing p;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(Eq("77")), TextMessage(Eq(absl::StrCat(p, 77))),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << p << 77;
+}
+
+struct PointMultipleAppend {
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, const PointMultipleAppend& p) {
+    sink.Append("(");
+    sink.Append(absl::StrCat(p.x, ", ", p.y, ")"));
+  }
+
+  int x = 10;
+  int y = 20;
+};
+
+TEST(LogFormatTest, AbslStringifyMultipleAppend) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  PointMultipleAppend p;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(" }
+                                           value { str: "10, 20)" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << p;
+}
+
+TEST(ManipulatorLogFormatTest, BoolAlphaTrue) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const bool value = true;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::noboolalpha << value << " "  //
+                    << std::boolalpha << value << " "    //
+                    << std::noboolalpha << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("1 true 1")),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { str: "1" }
+                                  value { literal: " " }
+                                  value { str: "true" }
+                                  value { literal: " " }
+                                  value { str: "1" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::noboolalpha << value << " "  //
+            << std::boolalpha << value << " "    //
+            << std::noboolalpha << value;
+}
+
+TEST(ManipulatorLogFormatTest, BoolAlphaFalse) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const bool value = false;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::noboolalpha << value << " "  //
+                    << std::boolalpha << value << " "    //
+                    << std::noboolalpha << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("0 false 0")),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { str: "0" }
+                                  value { literal: " " }
+                                  value { str: "false" }
+                                  value { literal: " " }
+                                  value { str: "0" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::noboolalpha << value << " "  //
+            << std::boolalpha << value << " "    //
+            << std::noboolalpha << value;
+}
+
+TEST(ManipulatorLogFormatTest, ShowPoint) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 77.0;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::noshowpoint << value << " "  //
+                    << std::showpoint << value << " "    //
+                    << std::noshowpoint << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77 77.0000 77")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
+                                                  value { literal: " " }
+                                                  value { str: "77.0000" }
+                                                  value { literal: " " }
+                                                  value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::noshowpoint << value << " "  //
+            << std::showpoint << value << " "    //
+            << std::noshowpoint << value;
+}
+
+TEST(ManipulatorLogFormatTest, ShowPos) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::noshowpos << value << " "  //
+                    << std::showpos << value << " "    //
+                    << std::noshowpos << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77 +77 77")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
+                                                  value { literal: " " }
+                                                  value { str: "+77" }
+                                                  value { literal: " " }
+                                                  value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::noshowpos << value << " "  //
+            << std::showpos << value << " "    //
+            << std::noshowpos << value;
+}
+
+TEST(ManipulatorLogFormatTest, UppercaseFloat) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 7.7e7;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::nouppercase << value << " "  //
+                    << std::uppercase << value << " "    //
+                    << std::nouppercase << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("7.7e+07 7.7E+07 7.7e+07")),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { str: "7.7e+07" }
+                                  value { literal: " " }
+                                  value { str: "7.7E+07" }
+                                  value { literal: " " }
+                                  value { str: "7.7e+07" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::nouppercase << value << " "  //
+            << std::uppercase << value << " "    //
+            << std::nouppercase << value;
+}
+
+TEST(ManipulatorLogFormatTest, Hex) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 0x77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::hex << value;
+
+  EXPECT_CALL(
+      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                            TextMessage(Eq("0x77")),
+                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                               str: "0x77"
+                                                             })pb")))));
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hex << value;
+}
+
+TEST(ManipulatorLogFormatTest, Oct) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 077;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::oct << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("077")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "077" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::oct << value;
+}
+
+TEST(ManipulatorLogFormatTest, Dec) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::hex << std::dec << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hex << std::dec << value;
+}
+
+TEST(ManipulatorLogFormatTest, ShowbaseHex) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 0x77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::hex                         //
+                    << std::noshowbase << value << " "  //
+                    << std::showbase << value << " "    //
+                    << std::noshowbase << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77 0x77 77")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
+                                                  value { literal: " " }
+                                                  value { str: "0x77" }
+                                                  value { literal: " " }
+                                                  value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hex                         //
+            << std::noshowbase << value << " "  //
+            << std::showbase << value << " "    //
+            << std::noshowbase << value;
+}
+
+TEST(ManipulatorLogFormatTest, ShowbaseOct) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 077;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::oct                         //
+                    << std::noshowbase << value << " "  //
+                    << std::showbase << value << " "    //
+                    << std::noshowbase << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77 077 77")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
+                                                  value { literal: " " }
+                                                  value { str: "077" }
+                                                  value { literal: " " }
+                                                  value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::oct                         //
+            << std::noshowbase << value << " "  //
+            << std::showbase << value << " "    //
+            << std::noshowbase << value;
+}
+
+TEST(ManipulatorLogFormatTest, UppercaseHex) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 0xbeef;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream                        //
+      << std::hex                          //
+      << std::nouppercase << value << " "  //
+      << std::uppercase << value << " "    //
+      << std::nouppercase << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("0xbeef 0XBEEF 0xbeef")),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { str: "0xbeef" }
+                                  value { literal: " " }
+                                  value { str: "0XBEEF" }
+                                  value { literal: " " }
+                                  value { str: "0xbeef" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hex                          //
+            << std::nouppercase << value << " "  //
+            << std::uppercase << value << " "    //
+            << std::nouppercase << value;
+}
+
+TEST(ManipulatorLogFormatTest, FixedFloat) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 7.7e7;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::fixed << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77000000.000000")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "77000000.000000"
+                                                  })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::fixed << value;
+}
+
+TEST(ManipulatorLogFormatTest, ScientificFloat) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 7.7e7;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::scientific << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("7.700000e+07")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "7.700000e+07"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::scientific << value;
+}
+
+#if defined(__BIONIC__) && (!defined(__ANDROID_API__) || __ANDROID_API__ < 22)
+// Bionic doesn't support `%a` until API 22, so this prints 'a' even if the
+// C++ standard library implements it correctly (by forwarding to printf).
+#elif defined(__GLIBCXX__) && __cplusplus < 201402L
+// libstdc++ shipped C++11 support without `std::hexfloat`.
+#else
+TEST(ManipulatorLogFormatTest, FixedAndScientificFloat) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 7.7e7;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::setiosflags(std::ios_base::scientific |
+                                        std::ios_base::fixed)
+                    << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"),
+                                   Eq("0x1.25bb500000000p+26"))),
+                 ENCODED_MESSAGE(
+                     AnyOf(EqualsProto(R"pb(value { str: "0x1.25bb5p+26" })pb"),
+                           EqualsProto(R"pb(value {
+                                              str: "0x1.25bb500000000p+26"
+                                            })pb"))))));
+
+  test_sink.StartCapturingLogs();
+
+  // This combination should mean the same thing as `std::hexfloat`.
+  LOG(INFO) << std::setiosflags(std::ios_base::scientific |
+                                std::ios_base::fixed)
+            << value;
+}
+#endif
+
+#if defined(__BIONIC__) && (!defined(__ANDROID_API__) || __ANDROID_API__ < 22)
+// Bionic doesn't support `%a` until API 22, so this prints 'a' even if the C++
+// standard library supports `std::hexfloat` (by forwarding to printf).
+#elif defined(__GLIBCXX__) && __cplusplus < 201402L
+// libstdc++ shipped C++11 support without `std::hexfloat`.
+#else
+TEST(ManipulatorLogFormatTest, HexfloatFloat) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 7.7e7;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::hexfloat << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"),
+                                   Eq("0x1.25bb500000000p+26"))),
+                 ENCODED_MESSAGE(
+                     AnyOf(EqualsProto(R"pb(value { str: "0x1.25bb5p+26" })pb"),
+                           EqualsProto(R"pb(value {
+                                              str: "0x1.25bb500000000p+26"
+                                            })pb"))))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hexfloat << value;
+}
+#endif
+
+TEST(ManipulatorLogFormatTest, DefaultFloatFloat) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 7.7e7;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::hexfloat << std::defaultfloat << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("7.7e+07")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "7.7e+07"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hexfloat << std::defaultfloat << value;
+}
+
+TEST(ManipulatorLogFormatTest, Ends) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::ends;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq(absl::string_view("\0", 1))),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "\0" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::ends;
+}
+
+TEST(ManipulatorLogFormatTest, Endl) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::endl;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(MatchesOstream(comparison_stream)),
+          TextMessage(Eq("\n")),
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "\n" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::endl;
+}
+
+TEST(ManipulatorLogFormatTest, SetIosFlags) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 0x77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::resetiosflags(std::ios_base::basefield)
+                    << std::setiosflags(std::ios_base::hex) << value << " "  //
+                    << std::resetiosflags(std::ios_base::basefield)
+                    << std::setiosflags(std::ios_base::dec) << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(MatchesOstream(comparison_stream)),
+          TextMessage(Eq("0x77 119")),
+          // `std::setiosflags` and `std::resetiosflags` aren't manipulators.
+          // We're unable to distinguish their return type(s) from arbitrary
+          // user-defined types and thus don't suppress the empty str value.
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "0x77" }
+                                           value { literal: " " }
+                                           value { str: "119" }
+          )pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::resetiosflags(std::ios_base::basefield)
+            << std::setiosflags(std::ios_base::hex) << value << " "  //
+            << std::resetiosflags(std::ios_base::basefield)
+            << std::setiosflags(std::ios_base::dec) << value;
+}
+
+TEST(ManipulatorLogFormatTest, SetBase) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 0x77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::setbase(16) << value << " "  //
+                    << std::setbase(0) << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("0x77 119")),
+                 // `std::setbase` isn't a manipulator.  We're unable to
+                 // distinguish its return type from arbitrary user-defined
+                 // types and thus don't suppress the empty str value.
+                 ENCODED_MESSAGE(EqualsProto(
+                     R"pb(value { str: "0x77" }
+                          value { literal: " " }
+                          value { str: "119" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::setbase(16) << value << " "  //
+            << std::setbase(0) << value;
+}
+
+TEST(ManipulatorLogFormatTest, SetPrecision) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 6.022140857e23;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::setprecision(4) << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(MatchesOstream(comparison_stream)),
+          TextMessage(Eq("6.022e+23")),
+          // `std::setprecision` isn't a manipulator.  We're unable to
+          // distinguish its return type from arbitrary user-defined
+          // types and thus don't suppress the empty str value.
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "6.022e+23" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::setprecision(4) << value;
+}
+
+TEST(ManipulatorLogFormatTest, SetPrecisionOverflow) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const double value = 6.022140857e23;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::setprecision(200) << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("602214085700000015187968")),
+                 ENCODED_MESSAGE(EqualsProto(
+                     R"pb(value { str: "602214085700000015187968" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::setprecision(200) << value;
+}
+
+TEST(ManipulatorLogFormatTest, SetW) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::setw(8) << value;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(MatchesOstream(comparison_stream)),
+          TextMessage(Eq("      77")),
+          // `std::setw` isn't a manipulator.  We're unable to
+          // distinguish its return type from arbitrary user-defined
+          // types and thus don't suppress the empty str value.
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "      77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::setw(8) << value;
+}
+
+TEST(ManipulatorLogFormatTest, Left) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = -77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::left << std::setw(8) << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-77     ")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "-77     "
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::left << std::setw(8) << value;
+}
+
+TEST(ManipulatorLogFormatTest, Right) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = -77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::right << std::setw(8) << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("     -77")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "     -77"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::right << std::setw(8) << value;
+}
+
+TEST(ManipulatorLogFormatTest, Internal) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = -77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::internal << std::setw(8) << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-     77")),
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "-     77"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::internal << std::setw(8) << value;
+}
+
+TEST(ManipulatorLogFormatTest, SetFill) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  const int value = 77;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << std::setfill('0') << std::setw(8) << value;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("00000077")),
+                         // `std::setfill` isn't a manipulator.  We're
+                         // unable to distinguish its return
+                         // type from arbitrary user-defined types and
+                         // thus don't suppress the empty str value.
+                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                            str: "00000077"
+                                                          })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::setfill('0') << std::setw(8) << value;
+}
+
+class FromCustomClass {};
+std::ostream& operator<<(std::ostream& os, const FromCustomClass&) {
+  return os << "FromCustomClass{}" << std::hex;
+}
+
+TEST(ManipulatorLogFormatTest, FromCustom) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  FromCustomClass value;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value << " " << 0x77;
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("FromCustomClass{} 0x77")),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { str: "FromCustomClass{}" }
+                                  value { literal: " " }
+                                  value { str: "0x77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value << " " << 0x77;
+}
+
+class StreamsNothing {};
+std::ostream& operator<<(std::ostream& os, const StreamsNothing&) { return os; }
+
+TEST(ManipulatorLogFormatTest, CustomClassStreamsNothing) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  StreamsNothing value;
+  auto comparison_stream = ComparisonStream();
+  comparison_stream << value << 77;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("77")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << value << 77;
+}
+
+struct PointPercentV {
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, const PointPercentV& p) {
+    absl::Format(&sink, "(%v, %v)", p.x, p.y);
+  }
+
+  int x = 10;
+  int y = 20;
+};
+
+TEST(ManipulatorLogFormatTest, IOManipsDoNotAffectAbslStringify) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  PointPercentV p;
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::hex << p;
+}
+
+TEST(StructuredLoggingOverflowTest, TruncatesStrings) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  // This message is too long and should be truncated to some unspecified size
+  // no greater than the buffer size but not too much less either.  It should be
+  // truncated rather than discarded.
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(AllOf(
+              SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
+                           Le(absl::log_internal::kLogMessageBufferSize))),
+              Each(Eq('x')))),
+          ENCODED_MESSAGE(HasOneStrThat(AllOf(
+              SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
+                           Le(absl::log_internal::kLogMessageBufferSize))),
+              Each(Eq('x'))))))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x');
+}
+
+struct StringLike {
+  absl::string_view data;
+};
+std::ostream& operator<<(std::ostream& os, StringLike str) {
+  return os << str.data;
+}
+
+TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperators) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  // This message is too long and should be truncated to some unspecified size
+  // no greater than the buffer size but not too much less either.  It should be
+  // truncated rather than discarded.
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(AllOf(
+              SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
+                           Le(absl::log_internal::kLogMessageBufferSize))),
+              Each(Eq('x')))),
+          ENCODED_MESSAGE(HasOneStrThat(AllOf(
+              SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
+                           Le(absl::log_internal::kLogMessageBufferSize))),
+              Each(Eq('x'))))))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << StringLike{
+      std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x')};
+}
+
+// Returns the size of the largest string that will fit in a `LOG` message
+// buffer with no prefix.
+size_t MaxLogFieldLengthNoPrefix() {
+  class StringLengthExtractorSink : public absl::LogSink {
+   public:
+    void Send(const absl::LogEntry& entry) override {
+      CHECK(!size_.has_value());
+      CHECK_EQ(entry.text_message().find_first_not_of('x'),
+               absl::string_view::npos);
+      size_.emplace(entry.text_message().size());
+    }
+    size_t size() const {
+      CHECK(size_.has_value());
+      return *size_;
+    }
+
+   private:
+    absl::optional<size_t> size_;
+  } extractor_sink;
+  LOG(INFO).NoPrefix().ToSinkOnly(&extractor_sink)
+      << std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x');
+  return extractor_sink.size();
+}
+
+TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) {
+  const size_t longest_fit = MaxLogFieldLengthNoPrefix();
+  // To log a second value field, we need four bytes: two tag/type bytes and two
+  // sizes.  To put any data in the field we need a fifth byte.
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits exactly, no part of y fits.
+    LOG(INFO).NoPrefix() << std::string(longest_fit, 'x') << "y";
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 1), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, one byte from y's header fits but shouldn't be visible.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 1, 'x') << "y";
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 2), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, two bytes from y's header fit but shouldn't be visible.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 2, 'x') << "y";
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 3), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, three bytes from y's header fit but shouldn't be visible.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 3, 'x') << "y";
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrAndOneLiteralThat(
+                               AllOf(SizeIs(longest_fit - 4), Each(Eq('x'))),
+                               IsEmpty())),
+                           RawEncodedMessage(Not(AsString(EndsWith("x")))))));
+    test_sink.StartCapturingLogs();
+    // x fits, all four bytes from y's header fit but no data bytes do, so we
+    // encode an empty string.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 4, 'x') << "y";
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(
+        test_sink,
+        Send(AllOf(ENCODED_MESSAGE(HasOneStrAndOneLiteralThat(
+                       AllOf(SizeIs(longest_fit - 5), Each(Eq('x'))), Eq("y"))),
+                   RawEncodedMessage(AsString(EndsWith("y"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, y fits exactly.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 5, 'x') << "y";
+  }
+}
+
+TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) {
+  const size_t longest_fit = MaxLogFieldLengthNoPrefix();
+  // To log a second value field, we need four bytes: two tag/type bytes and two
+  // sizes.  To put any data in the field we need a fifth byte.
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits exactly, no part of y fits.
+    LOG(INFO).NoPrefix() << std::string(longest_fit, 'x') << StringLike{"y"};
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 1), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, one byte from y's header fits but shouldn't be visible.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 1, 'x')
+                         << StringLike{"y"};
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 2), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, two bytes from y's header fit but shouldn't be visible.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 2, 'x')
+                         << StringLike{"y"};
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 3), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, three bytes from y's header fit but shouldn't be visible.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 3, 'x')
+                         << StringLike{"y"};
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(test_sink,
+                Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
+                               AllOf(SizeIs(longest_fit - 4), Each(Eq('x'))))),
+                           RawEncodedMessage(AsString(EndsWith("x"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, all four bytes from y's header fit but no data bytes do.  We
+    // don't encode an empty string here because every I/O manipulator hits this
+    // codepath and those shouldn't leave empty strings behind.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 4, 'x')
+                         << StringLike{"y"};
+  }
+  {
+    absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+    EXPECT_CALL(
+        test_sink,
+        Send(AllOf(ENCODED_MESSAGE(HasTwoStrsThat(
+                       AllOf(SizeIs(longest_fit - 5), Each(Eq('x'))), Eq("y"))),
+                   RawEncodedMessage(AsString(EndsWith("y"))))));
+    test_sink.StartCapturingLogs();
+    // x fits, y fits exactly.
+    LOG(INFO).NoPrefix() << std::string(longest_fit - 5, 'x')
+                         << StringLike{"y"};
+  }
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/log_macro_hygiene_test.cc b/abseil-cpp/absl/log/log_macro_hygiene_test.cc
new file mode 100644
index 0000000..dad9389
--- /dev/null
+++ b/abseil-cpp/absl/log/log_macro_hygiene_test.cc
@@ -0,0 +1,187 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/log.h"
+#include "absl/log/scoped_mock_log.h"
+
+namespace {
+using ::testing::_;
+using ::testing::Eq;
+
+namespace not_absl {
+
+class Dummy {
+ public:
+  Dummy() {}
+
+ private:
+  Dummy(const Dummy&) = delete;
+  Dummy& operator=(const Dummy&) = delete;
+};
+
+// This line tests that local definitions of INFO, WARNING, ERROR, and
+// etc don't shadow the global ones used by the logging macros.  If
+// they do, the LOG() calls in the tests won't compile, catching the
+// bug.
+const Dummy INFO, WARNING, ERROR, FATAL, NUM_SEVERITIES;
+
+// These makes sure that the uses of same-named types in the
+// implementation of the logging macros are fully qualified.
+class string {};
+class vector {};
+class LogMessage {};
+class LogMessageFatal {};
+class LogMessageQuietlyFatal {};
+class LogMessageVoidify {};
+class LogSink {};
+class NullStream {};
+class NullStreamFatal {};
+
+}  // namespace not_absl
+
+using namespace not_absl;  // NOLINT
+
+// Tests for LOG(LEVEL(()).
+
+TEST(LogHygieneTest, WorksForQualifiedSeverity) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  ::testing::InSequence seq;
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "To INFO"));
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kWarning, _, "To WARNING"));
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kError, _, "To ERROR"));
+
+  test_sink.StartCapturingLogs();
+  // Note that LOG(LEVEL()) expects the severity as a run-time
+  // expression (as opposed to a compile-time constant).  Hence we
+  // test that :: is allowed before INFO, etc.
+  LOG(LEVEL(absl::LogSeverity::kInfo)) << "To INFO";
+  LOG(LEVEL(absl::LogSeverity::kWarning)) << "To WARNING";
+  LOG(LEVEL(absl::LogSeverity::kError)) << "To ERROR";
+}
+
+TEST(LogHygieneTest, WorksWithAlternativeINFOSymbol) {
+  const double INFO ABSL_ATTRIBUTE_UNUSED = 7.77;
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "Hello world"));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << "Hello world";
+}
+
+TEST(LogHygieneTest, WorksWithAlternativeWARNINGSymbol) {
+  const double WARNING ABSL_ATTRIBUTE_UNUSED = 7.77;
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kWarning, _, "Hello world"));
+
+  test_sink.StartCapturingLogs();
+  LOG(WARNING) << "Hello world";
+}
+
+TEST(LogHygieneTest, WorksWithAlternativeERRORSymbol) {
+  const double ERROR ABSL_ATTRIBUTE_UNUSED = 7.77;
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kError, _, "Hello world"));
+
+  test_sink.StartCapturingLogs();
+  LOG(ERROR) << "Hello world";
+}
+
+TEST(LogHygieneTest, WorksWithAlternativeLEVELSymbol) {
+  const double LEVEL ABSL_ATTRIBUTE_UNUSED = 7.77;
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kError, _, "Hello world"));
+
+  test_sink.StartCapturingLogs();
+  LOG(LEVEL(absl::LogSeverity::kError)) << "Hello world";
+}
+
+#define INFO Bogus
+#ifdef NDEBUG
+constexpr bool IsOptimized = false;
+#else
+constexpr bool IsOptimized = true;
+#endif
+
+TEST(LogHygieneTest, WorksWithINFODefined) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "Hello world"))
+      .Times(2 + (IsOptimized ? 2 : 0));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << "Hello world";
+  LOG_IF(INFO, true) << "Hello world";
+
+  DLOG(INFO) << "Hello world";
+  DLOG_IF(INFO, true) << "Hello world";
+}
+
+#undef INFO
+
+#define _INFO Bogus
+TEST(LogHygieneTest, WorksWithUnderscoreINFODefined) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "Hello world"))
+      .Times(2 + (IsOptimized ? 2 : 0));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << "Hello world";
+  LOG_IF(INFO, true) << "Hello world";
+
+  DLOG(INFO) << "Hello world";
+  DLOG_IF(INFO, true) << "Hello world";
+}
+#undef _INFO
+
+TEST(LogHygieneTest, ExpressionEvaluationInLEVELSeverity) {
+  auto i = static_cast<int>(absl::LogSeverity::kInfo);
+  LOG(LEVEL(++i)) << "hello world";  // NOLINT
+  EXPECT_THAT(i, Eq(static_cast<int>(absl::LogSeverity::kInfo) + 1));
+}
+
+TEST(LogHygieneTest, ExpressionEvaluationInStreamedMessage) {
+  int i = 0;
+  LOG(INFO) << ++i;
+  EXPECT_THAT(i, 1);
+  LOG_IF(INFO, false) << ++i;
+  EXPECT_THAT(i, 1);
+}
+
+// Tests that macros are usable in unbraced switch statements.
+// -----------------------------------------------------------
+
+class UnbracedSwitchCompileTest {
+  static void Log() {
+    switch (0) {
+      case 0:
+        LOG(INFO);
+        break;
+      default:
+        break;
+    }
+  }
+};
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/log_modifier_methods_test.cc b/abseil-cpp/absl/log/log_modifier_methods_test.cc
new file mode 100644
index 0000000..42e13b1
--- /dev/null
+++ b/abseil-cpp/absl/log/log_modifier_methods_test.cc
@@ -0,0 +1,233 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <errno.h>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/log/internal/test_actions.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/log/log_sink.h"
+#include "absl/log/scoped_mock_log.h"
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "absl/time/time.h"
+
+namespace {
+#if GTEST_HAS_DEATH_TEST
+using ::absl::log_internal::DeathTestExpectedLogging;
+using ::absl::log_internal::DeathTestUnexpectedLogging;
+using ::absl::log_internal::DeathTestValidateExpectations;
+using ::absl::log_internal::DiedOfQFatal;
+#endif
+using ::absl::log_internal::LogSeverity;
+using ::absl::log_internal::Prefix;
+using ::absl::log_internal::SourceBasename;
+using ::absl::log_internal::SourceFilename;
+using ::absl::log_internal::SourceLine;
+using ::absl::log_internal::Stacktrace;
+using ::absl::log_internal::TextMessage;
+using ::absl::log_internal::TextMessageWithPrefix;
+using ::absl::log_internal::TextMessageWithPrefixAndNewline;
+using ::absl::log_internal::TextPrefix;
+using ::absl::log_internal::ThreadID;
+using ::absl::log_internal::Timestamp;
+using ::absl::log_internal::Verbosity;
+
+using ::testing::AllOf;
+using ::testing::AnyNumber;
+using ::testing::AnyOf;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::IsFalse;
+using ::testing::Truly;
+
+TEST(TailCallsModifiesTest, AtLocationFileLine) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          // The metadata should change:
+          SourceFilename(Eq("/my/very/very/very_long_source_file.cc")),
+          SourceBasename(Eq("very_long_source_file.cc")), SourceLine(Eq(777)),
+          // The logged line should change too, even though the prefix must
+          // grow to fit the new metadata.
+          TextMessageWithPrefix(Truly([](absl::string_view msg) {
+            return absl::EndsWith(msg,
+                                  " very_long_source_file.cc:777] hello world");
+          })))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).AtLocation("/my/very/very/very_long_source_file.cc", 777)
+      << "hello world";
+}
+
+TEST(TailCallsModifiesTest, NoPrefix) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(AllOf(Prefix(IsFalse()), TextPrefix(IsEmpty()),
+                                    TextMessageWithPrefix(Eq("hello world")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).NoPrefix() << "hello world";
+}
+
+TEST(TailCallsModifiesTest, NoPrefixNoMessageNoShirtNoShoesNoService) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(Prefix(IsFalse()), TextPrefix(IsEmpty()),
+                         TextMessageWithPrefix(IsEmpty()),
+                         TextMessageWithPrefixAndNewline(Eq("\n")))));
+  test_sink.StartCapturingLogs();
+  LOG(INFO).NoPrefix();
+}
+
+TEST(TailCallsModifiesTest, WithVerbosity) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(Verbosity(Eq(2))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).WithVerbosity(2) << "hello world";
+}
+
+TEST(TailCallsModifiesTest, WithVerbosityNoVerbosity) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink,
+              Send(Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).WithVerbosity(2).WithVerbosity(absl::LogEntry::kNoVerbosityLevel)
+      << "hello world";
+}
+
+TEST(TailCallsModifiesTest, WithTimestamp) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(Timestamp(Eq(absl::UnixEpoch()))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).WithTimestamp(absl::UnixEpoch()) << "hello world";
+}
+
+TEST(TailCallsModifiesTest, WithThreadID) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink,
+              Send(AllOf(ThreadID(Eq(absl::LogEntry::tid_t{1234})))));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).WithThreadID(1234) << "hello world";
+}
+
+TEST(TailCallsModifiesTest, WithMetadataFrom) {
+  class ForwardingLogSink : public absl::LogSink {
+   public:
+    void Send(const absl::LogEntry &entry) override {
+      LOG(LEVEL(entry.log_severity())).WithMetadataFrom(entry)
+          << "forwarded: " << entry.text_message();
+    }
+  } forwarding_sink;
+
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("fake/file")), SourceBasename(Eq("file")),
+                 SourceLine(Eq(123)), Prefix(IsFalse()),
+                 LogSeverity(Eq(absl::LogSeverity::kWarning)),
+                 Timestamp(Eq(absl::UnixEpoch())),
+                 ThreadID(Eq(absl::LogEntry::tid_t{456})),
+                 TextMessage(Eq("forwarded: hello world")), Verbosity(Eq(7)),
+                 ENCODED_MESSAGE(
+                     EqualsProto(R"pb(value { literal: "forwarded: " }
+                                      value { str: "hello world" })pb")))));
+
+  test_sink.StartCapturingLogs();
+  LOG(WARNING)
+          .AtLocation("fake/file", 123)
+          .NoPrefix()
+          .WithTimestamp(absl::UnixEpoch())
+          .WithThreadID(456)
+          .WithVerbosity(7)
+          .ToSinkOnly(&forwarding_sink)
+      << "hello world";
+}
+
+TEST(TailCallsModifiesTest, WithPerror) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(AnyOf(Eq("hello world: Bad file number [9]"),
+                                   Eq("hello world: Bad file descriptor [9]"),
+                                   Eq("hello world: Bad file descriptor [8]"))),
+                 ENCODED_MESSAGE(
+                     AnyOf(EqualsProto(R"pb(value { literal: "hello world" }
+                                            value { literal: ": " }
+                                            value { str: "Bad file number" }
+                                            value { literal: " [" }
+                                            value { str: "9" }
+                                            value { literal: "]" })pb"),
+                           EqualsProto(R"pb(value { literal: "hello world" }
+                                            value { literal: ": " }
+                                            value { str: "Bad file descriptor" }
+                                            value { literal: " [" }
+                                            value { str: "9" }
+                                            value { literal: "]" })pb"),
+                           EqualsProto(R"pb(value { literal: "hello world" }
+                                            value { literal: ": " }
+                                            value { str: "Bad file descriptor" }
+                                            value { literal: " [" }
+                                            value { str: "8" }
+                                            value { literal: "]" })pb"))))));
+
+  test_sink.StartCapturingLogs();
+  errno = EBADF;
+  LOG(INFO).WithPerror() << "hello world";
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(ModifierMethodDeathTest, ToSinkOnlyQFatal) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink(
+            absl::MockLogDefault::kDisallowUnexpected);
+
+        auto do_log = [&test_sink] {
+          LOG(QFATAL).ToSinkOnly(&test_sink.UseAsLocalSink()) << "hello world";
+        };
+
+        EXPECT_CALL(test_sink, Send)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("hello world")),
+                                          Stacktrace(IsEmpty()))))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        do_log();
+      },
+      DiedOfQFatal, DeathTestValidateExpectations());
+}
+#endif
+
+}  // namespace
diff --git a/abseil-cpp/absl/debugging/leak_check_disable.cc b/abseil-cpp/absl/log/log_sink.cc
similarity index 61%
copy from abseil-cpp/absl/debugging/leak_check_disable.cc
copy to abseil-cpp/absl/log/log_sink.cc
index 924d6e3..01d7ca8 100644
--- a/abseil-cpp/absl/debugging/leak_check_disable.cc
+++ b/abseil-cpp/absl/log/log_sink.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2022 The Abseil Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
-//      https://www.apache.org/licenses/LICENSE-2.0
+//     https://www.apache.org/licenses/LICENSE-2.0
 //
 // Unless required by applicable law or agreed to in writing, software
 // distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,9 +12,12 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Disable LeakSanitizer when this file is linked in.
-// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
-extern "C" int __lsan_is_turned_off();
-extern "C" int __lsan_is_turned_off() {
-  return 1;
-}
+#include "absl/log/log_sink.h"
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+void LogSink::KeyFunction() const {}
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/log_sink.h b/abseil-cpp/absl/log/log_sink.h
new file mode 100644
index 0000000..9bfa6f8
--- /dev/null
+++ b/abseil-cpp/absl/log/log_sink.h
@@ -0,0 +1,64 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/log_sink.h
+// -----------------------------------------------------------------------------
+//
+// This header declares the interface class `absl::LogSink`.
+
+#ifndef ABSL_LOG_LOG_SINK_H_
+#define ABSL_LOG_LOG_SINK_H_
+
+#include "absl/base/config.h"
+#include "absl/log/log_entry.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::LogSink
+//
+// `absl::LogSink` is an interface which can be extended to intercept and
+// process particular messages (with `LOG.ToSinkOnly()` or
+// `LOG.ToSinkAlso()`) or all messages (if registered with
+// `absl::AddLogSink`).  Implementations must be thread-safe, and should take
+// care not to take any locks that might be held by the `LOG` caller.
+class LogSink {
+ public:
+  virtual ~LogSink() = default;
+
+  // LogSink::Send()
+  //
+  // `Send` is called synchronously during the log statement.
+  //
+  // It is safe to use `LOG` within an implementation of `Send`.  `ToSinkOnly`
+  // and `ToSinkAlso` are safe in general but can be used to create an infinite
+  // loop if you try.
+  virtual void Send(const absl::LogEntry& entry) = 0;
+
+  // LogSink::Flush()
+  //
+  // Sinks that buffer messages should override this method to flush the buffer
+  // and return.
+  virtual void Flush() {}
+
+ private:
+  // https://lld.llvm.org/missingkeyfunction.html#missing-key-function
+  virtual void KeyFunction() const final;  // NOLINT(readability/inheritance)
+};
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_LOG_SINK_H_
diff --git a/abseil-cpp/absl/log/log_sink_registry.h b/abseil-cpp/absl/log/log_sink_registry.h
new file mode 100644
index 0000000..bf76cce
--- /dev/null
+++ b/abseil-cpp/absl/log/log_sink_registry.h
@@ -0,0 +1,61 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/log_sink_registry.h
+// -----------------------------------------------------------------------------
+//
+// This header declares APIs to operate on global set of registered log sinks.
+
+#ifndef ABSL_LOG_LOG_SINK_REGISTRY_H_
+#define ABSL_LOG_LOG_SINK_REGISTRY_H_
+
+#include "absl/base/config.h"
+#include "absl/log/internal/log_sink_set.h"
+#include "absl/log/log_sink.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// AddLogSink(), RemoveLogSink()
+//
+// Adds or removes a `absl::LogSink` as a consumer of logging data.
+//
+// These functions are thread-safe.
+//
+// It is an error to attempt to add a sink that's already registered or to
+// attempt to remove one that isn't.
+//
+// To avoid unbounded recursion, dispatch to registered `absl::LogSink`s is
+// disabled per-thread while running the `Send()` method of registered
+// `absl::LogSink`s.  Affected messages are dispatched to a special internal
+// sink instead which writes them to `stderr`.
+//
+// Do not call these inside `absl::LogSink::Send`.
+inline void AddLogSink(absl::LogSink* sink) { log_internal::AddLogSink(sink); }
+inline void RemoveLogSink(absl::LogSink* sink) {
+  log_internal::RemoveLogSink(sink);
+}
+
+// FlushLogSinks()
+//
+// Calls `absl::LogSink::Flush` on all registered sinks.
+//
+// Do not call this inside `absl::LogSink::Send`.
+inline void FlushLogSinks() { log_internal::FlushLogSinks(); }
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_LOG_SINK_REGISTRY_H_
diff --git a/abseil-cpp/absl/log/log_sink_test.cc b/abseil-cpp/absl/log/log_sink_test.cc
new file mode 100644
index 0000000..fa74306
--- /dev/null
+++ b/abseil-cpp/absl/log/log_sink_test.cc
@@ -0,0 +1,418 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/log_sink.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/log/internal/test_actions.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/log/log_sink_registry.h"
+#include "absl/log/scoped_mock_log.h"
+#include "absl/strings/string_view.h"
+
+namespace {
+
+using ::absl::log_internal::DeathTestExpectedLogging;
+using ::absl::log_internal::DeathTestUnexpectedLogging;
+using ::absl::log_internal::DeathTestValidateExpectations;
+using ::absl::log_internal::DiedOfFatal;
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+// Tests for global log sink registration.
+// ---------------------------------------
+
+TEST(LogSinkRegistryTest, AddLogSink) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  InSequence s;
+  EXPECT_CALL(test_sink, Log(_, _, "hello world")).Times(0);
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, __FILE__, "Test : 42"));
+  EXPECT_CALL(test_sink,
+              Log(absl::LogSeverity::kWarning, __FILE__, "Danger ahead"));
+  EXPECT_CALL(test_sink,
+              Log(absl::LogSeverity::kError, __FILE__, "This is an error"));
+
+  LOG(INFO) << "hello world";
+  test_sink.StartCapturingLogs();
+
+  LOG(INFO) << "Test : " << 42;
+  LOG(WARNING) << "Danger" << ' ' << "ahead";
+  LOG(ERROR) << "This is an error";
+
+  test_sink.StopCapturingLogs();
+  LOG(INFO) << "Goodby world";
+}
+
+TEST(LogSinkRegistryTest, MultipleLogSinks) {
+  absl::ScopedMockLog test_sink1(absl::MockLogDefault::kDisallowUnexpected);
+  absl::ScopedMockLog test_sink2(absl::MockLogDefault::kDisallowUnexpected);
+
+  ::testing::InSequence seq;
+  EXPECT_CALL(test_sink1, Log(absl::LogSeverity::kInfo, _, "First")).Times(1);
+  EXPECT_CALL(test_sink2, Log(absl::LogSeverity::kInfo, _, "First")).Times(0);
+
+  EXPECT_CALL(test_sink1, Log(absl::LogSeverity::kInfo, _, "Second")).Times(1);
+  EXPECT_CALL(test_sink2, Log(absl::LogSeverity::kInfo, _, "Second")).Times(1);
+
+  EXPECT_CALL(test_sink1, Log(absl::LogSeverity::kInfo, _, "Third")).Times(0);
+  EXPECT_CALL(test_sink2, Log(absl::LogSeverity::kInfo, _, "Third")).Times(1);
+
+  LOG(INFO) << "Before first";
+
+  test_sink1.StartCapturingLogs();
+  LOG(INFO) << "First";
+
+  test_sink2.StartCapturingLogs();
+  LOG(INFO) << "Second";
+
+  test_sink1.StopCapturingLogs();
+  LOG(INFO) << "Third";
+
+  test_sink2.StopCapturingLogs();
+  LOG(INFO) << "Fourth";
+}
+
+TEST(LogSinkRegistrationDeathTest, DuplicateSinkRegistration) {
+  ASSERT_DEATH_IF_SUPPORTED(
+      {
+        absl::ScopedMockLog sink;
+        sink.StartCapturingLogs();
+        absl::AddLogSink(&sink.UseAsLocalSink());
+      },
+      HasSubstr("Duplicate log sinks"));
+}
+
+TEST(LogSinkRegistrationDeathTest, MismatchSinkRemoval) {
+  ASSERT_DEATH_IF_SUPPORTED(
+      {
+        absl::ScopedMockLog sink;
+        absl::RemoveLogSink(&sink.UseAsLocalSink());
+      },
+      HasSubstr("Mismatched log sink"));
+}
+
+// Tests for log sink semantic.
+// ---------------------------------------
+
+TEST(LogSinkTest, FlushSinks) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Flush()).Times(2);
+
+  test_sink.StartCapturingLogs();
+
+  absl::FlushLogSinks();
+  absl::FlushLogSinks();
+}
+
+TEST(LogSinkDeathTest, DeathInSend) {
+  class FatalSendSink : public absl::LogSink {
+   public:
+    void Send(const absl::LogEntry&) override { LOG(FATAL) << "goodbye world"; }
+  };
+
+  FatalSendSink sink;
+  EXPECT_EXIT({ LOG(INFO).ToSinkAlso(&sink) << "hello world"; }, DiedOfFatal,
+              _);
+}
+
+// Tests for explicit log sink redirection.
+// ---------------------------------------
+
+TEST(LogSinkTest, ToSinkAlso) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+  absl::ScopedMockLog another_sink(absl::MockLogDefault::kDisallowUnexpected);
+  EXPECT_CALL(test_sink, Log(_, _, "hello world"));
+  EXPECT_CALL(another_sink, Log(_, _, "hello world"));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).ToSinkAlso(&another_sink.UseAsLocalSink()) << "hello world";
+}
+
+TEST(LogSinkTest, ToSinkOnly) {
+  absl::ScopedMockLog another_sink(absl::MockLogDefault::kDisallowUnexpected);
+  EXPECT_CALL(another_sink, Log(_, _, "hello world"));
+  LOG(INFO).ToSinkOnly(&another_sink.UseAsLocalSink()) << "hello world";
+}
+
+TEST(LogSinkTest, ToManySinks) {
+  absl::ScopedMockLog sink1(absl::MockLogDefault::kDisallowUnexpected);
+  absl::ScopedMockLog sink2(absl::MockLogDefault::kDisallowUnexpected);
+  absl::ScopedMockLog sink3(absl::MockLogDefault::kDisallowUnexpected);
+  absl::ScopedMockLog sink4(absl::MockLogDefault::kDisallowUnexpected);
+  absl::ScopedMockLog sink5(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(sink3, Log(_, _, "hello world"));
+  EXPECT_CALL(sink4, Log(_, _, "hello world"));
+  EXPECT_CALL(sink5, Log(_, _, "hello world"));
+
+  LOG(INFO)
+          .ToSinkAlso(&sink1.UseAsLocalSink())
+          .ToSinkAlso(&sink2.UseAsLocalSink())
+          .ToSinkOnly(&sink3.UseAsLocalSink())
+          .ToSinkAlso(&sink4.UseAsLocalSink())
+          .ToSinkAlso(&sink5.UseAsLocalSink())
+      << "hello world";
+}
+
+class ReentrancyTest : public ::testing::Test {
+ protected:
+  ReentrancyTest() = default;
+  enum class LogMode : int { kNormal, kToSinkAlso, kToSinkOnly };
+
+  class ReentrantSendLogSink : public absl::LogSink {
+   public:
+    explicit ReentrantSendLogSink(absl::LogSeverity severity,
+                                  absl::LogSink* sink, LogMode mode)
+        : severity_(severity), sink_(sink), mode_(mode) {}
+    explicit ReentrantSendLogSink(absl::LogSeverity severity)
+        : ReentrantSendLogSink(severity, nullptr, LogMode::kNormal) {}
+
+    void Send(const absl::LogEntry&) override {
+      switch (mode_) {
+        case LogMode::kNormal:
+          LOG(LEVEL(severity_)) << "The log is coming from *inside the sink*.";
+          break;
+        case LogMode::kToSinkAlso:
+          LOG(LEVEL(severity_)).ToSinkAlso(sink_)
+              << "The log is coming from *inside the sink*.";
+          break;
+        case LogMode::kToSinkOnly:
+          LOG(LEVEL(severity_)).ToSinkOnly(sink_)
+              << "The log is coming from *inside the sink*.";
+          break;
+        default:
+          LOG(FATAL) << "Invalid mode " << static_cast<int>(mode_);
+      }
+    }
+
+   private:
+    absl::LogSeverity severity_;
+    absl::LogSink* sink_;
+    LogMode mode_;
+  };
+
+  static absl::string_view LogAndReturn(absl::LogSeverity severity,
+                                        absl::string_view to_log,
+                                        absl::string_view to_return) {
+    LOG(LEVEL(severity)) << to_log;
+    return to_return;
+  }
+};
+
+TEST_F(ReentrancyTest, LogFunctionThatLogs) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  InSequence seq;
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "hello"));
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "world"));
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kWarning, _, "danger"));
+  EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "here"));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO) << LogAndReturn(absl::LogSeverity::kInfo, "hello", "world");
+  LOG(INFO) << LogAndReturn(absl::LogSeverity::kWarning, "danger", "here");
+}
+
+TEST_F(ReentrancyTest, RegisteredLogSinkThatLogsInSend) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+  ReentrantSendLogSink renentrant_sink(absl::LogSeverity::kInfo);
+  EXPECT_CALL(test_sink, Log(_, _, "hello world"));
+
+  test_sink.StartCapturingLogs();
+  absl::AddLogSink(&renentrant_sink);
+  LOG(INFO) << "hello world";
+  absl::RemoveLogSink(&renentrant_sink);
+}
+
+TEST_F(ReentrancyTest, AlsoLogSinkThatLogsInSend) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+  ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo);
+  EXPECT_CALL(test_sink, Log(_, _, "hello world"));
+  EXPECT_CALL(test_sink,
+              Log(_, _, "The log is coming from *inside the sink*."));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world";
+}
+
+TEST_F(ReentrancyTest, RegisteredAlsoLogSinkThatLogsInSend) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+  ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo);
+  EXPECT_CALL(test_sink, Log(_, _, "hello world"));
+  // We only call into the test_log sink once with this message, since the
+  // second time log statement is run we are in "ThreadIsLogging" mode and all
+  // the log statements are redirected into stderr.
+  EXPECT_CALL(test_sink,
+              Log(_, _, "The log is coming from *inside the sink*."));
+
+  test_sink.StartCapturingLogs();
+  absl::AddLogSink(&reentrant_sink);
+  LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world";
+  absl::RemoveLogSink(&reentrant_sink);
+}
+
+TEST_F(ReentrancyTest, OnlyLogSinkThatLogsInSend) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+  ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo);
+  EXPECT_CALL(test_sink,
+              Log(_, _, "The log is coming from *inside the sink*."));
+
+  test_sink.StartCapturingLogs();
+  LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world";
+}
+
+TEST_F(ReentrancyTest, RegisteredOnlyLogSinkThatLogsInSend) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+  ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo);
+  EXPECT_CALL(test_sink,
+              Log(_, _, "The log is coming from *inside the sink*."));
+
+  test_sink.StartCapturingLogs();
+  absl::AddLogSink(&reentrant_sink);
+  LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world";
+  absl::RemoveLogSink(&reentrant_sink);
+}
+
+using ReentrancyDeathTest = ReentrancyTest;
+
+TEST_F(ReentrancyDeathTest, LogFunctionThatLogsFatal) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+        EXPECT_CALL(test_sink, Log(_, _, "hello"))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        LOG(INFO) << LogAndReturn(absl::LogSeverity::kFatal, "hello", "world");
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+TEST_F(ReentrancyDeathTest, RegisteredLogSinkThatLogsFatalInSend) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+        ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal);
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+        EXPECT_CALL(test_sink, Log(_, _, "hello world"))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        absl::AddLogSink(&reentrant_sink);
+        LOG(INFO) << "hello world";
+        // No need to call RemoveLogSink - process is dead at this point.
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+TEST_F(ReentrancyDeathTest, AlsoLogSinkThatLogsFatalInSend) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+        ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal);
+
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+        EXPECT_CALL(test_sink, Log(_, _, "hello world"))
+            .WillOnce(DeathTestExpectedLogging());
+        EXPECT_CALL(test_sink,
+                    Log(_, _, "The log is coming from *inside the sink*."))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world";
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+TEST_F(ReentrancyDeathTest, RegisteredAlsoLogSinkThatLogsFatalInSend) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+        ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal);
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+        EXPECT_CALL(test_sink, Log(_, _, "hello world"))
+            .WillOnce(DeathTestExpectedLogging());
+        EXPECT_CALL(test_sink,
+                    Log(_, _, "The log is coming from *inside the sink*."))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        absl::AddLogSink(&reentrant_sink);
+        LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world";
+        // No need to call RemoveLogSink - process is dead at this point.
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+TEST_F(ReentrancyDeathTest, OnlyLogSinkThatLogsFatalInSend) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+        ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal);
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+        EXPECT_CALL(test_sink,
+                    Log(_, _, "The log is coming from *inside the sink*."))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world";
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+TEST_F(ReentrancyDeathTest, RegisteredOnlyLogSinkThatLogsFatalInSend) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+        ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal);
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+        EXPECT_CALL(test_sink,
+                    Log(_, _, "The log is coming from *inside the sink*."))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        absl::AddLogSink(&reentrant_sink);
+        LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world";
+        // No need to call RemoveLogSink - process is dead at this point.
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/log_streamer.h b/abseil-cpp/absl/log/log_streamer.h
new file mode 100644
index 0000000..2d41a07
--- /dev/null
+++ b/abseil-cpp/absl/log/log_streamer.h
@@ -0,0 +1,171 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/log_streamer.h
+// -----------------------------------------------------------------------------
+//
+// This header declares the class `LogStreamer` and convenience functions to
+// construct LogStreamer objects with different associated log severity levels.
+
+#ifndef ABSL_LOG_LOG_STREAMER_H_
+#define ABSL_LOG_LOG_STREAMER_H_
+
+#include <ios>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/absl_log.h"
+#include "absl/strings/internal/ostringstream.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// LogStreamer
+//
+// Although you can stream into `LOG(INFO)`, you can't pass it into a function
+// that takes a `std::ostream` parameter. `LogStreamer::stream()` provides a
+// `std::ostream` that buffers everything that's streamed in.  The buffer's
+// contents are logged as if by `LOG` when the `LogStreamer` is destroyed.
+// If nothing is streamed in, an empty message is logged.  If the specified
+// severity is `absl::LogSeverity::kFatal`, the program will be terminated when
+// the `LogStreamer` is destroyed regardless of whether any data were streamed
+// in.
+//
+// Factory functions corresponding to the `absl::LogSeverity` enumerators
+// are provided for convenience; if the desired severity is variable, invoke the
+// constructor directly.
+//
+// LogStreamer is movable, but not copyable.
+//
+// Examples:
+//
+//   ShaveYakAndWriteToStream(
+//       yak, absl::LogInfoStreamer(__FILE__, __LINE__).stream());
+//
+//   {
+//     // This logs a single line containing data streamed by all three function
+//     // calls.
+//     absl::LogStreamer streamer(absl::LogSeverity::kInfo, __FILE__, __LINE__);
+//     ShaveYakAndWriteToStream(yak1, streamer.stream());
+//     streamer.stream() << " ";
+//     ShaveYakAndWriteToStream(yak2, streamer.stream());
+//     streamer.stream() << " ";
+//     ShaveYakAndWriteToStreamPointer(yak3, &streamer.stream());
+//   }
+class LogStreamer final {
+ public:
+  // LogStreamer::LogStreamer()
+  //
+  // Creates a LogStreamer with a given `severity` that will log a message
+  // attributed to the given `file` and `line`.
+  explicit LogStreamer(absl::LogSeverity severity, absl::string_view file,
+                       int line)
+      : severity_(severity),
+        line_(line),
+        file_(file),
+        stream_(absl::in_place, &buf_) {
+    // To match `LOG`'s defaults:
+    stream_->setf(std::ios_base::showbase | std::ios_base::boolalpha);
+  }
+
+  // A moved-from `absl::LogStreamer` does not `LOG` when destroyed,
+  // and a program that streams into one has undefined behavior.
+  LogStreamer(LogStreamer&& that) noexcept
+      : severity_(that.severity_),
+        line_(that.line_),
+        file_(std::move(that.file_)),
+        buf_(std::move(that.buf_)),
+        stream_(std::move(that.stream_)) {
+    if (stream_.has_value()) stream_->str(&buf_);
+    that.stream_.reset();
+  }
+  LogStreamer& operator=(LogStreamer&& that) {
+    ABSL_LOG_IF(LEVEL(severity_), stream_).AtLocation(file_, line_) << buf_;
+    severity_ = that.severity_;
+    file_ = std::move(that.file_);
+    line_ = that.line_;
+    buf_ = std::move(that.buf_);
+    stream_ = std::move(that.stream_);
+    if (stream_.has_value()) stream_->str(&buf_);
+    that.stream_.reset();
+    return *this;
+  }
+
+  // LogStreamer::~LogStreamer()
+  //
+  // Logs this LogStreamer's buffered content as if by LOG.
+  ~LogStreamer() {
+    ABSL_LOG_IF(LEVEL(severity_), stream_.has_value()).AtLocation(file_, line_)
+        << buf_;
+  }
+
+  // LogStreamer::stream()
+  //
+  // Returns the `std::ostream` to use to write into this LogStreamer' internal
+  // buffer.
+  std::ostream& stream() { return *stream_; }
+
+ private:
+  absl::LogSeverity severity_;
+  int line_;
+  std::string file_;
+  std::string buf_;
+  // A disengaged `stream_` indicates a moved-from `LogStreamer` that should not
+  // `LOG` upon destruction.
+  absl::optional<absl::strings_internal::OStringStream> stream_;
+};
+
+// LogInfoStreamer()
+//
+// Returns a LogStreamer that writes at level LogSeverity::kInfo.
+inline LogStreamer LogInfoStreamer(absl::string_view file, int line) {
+  return absl::LogStreamer(absl::LogSeverity::kInfo, file, line);
+}
+
+// LogWarningStreamer()
+//
+// Returns a LogStreamer that writes at level LogSeverity::kWarning.
+inline LogStreamer LogWarningStreamer(absl::string_view file, int line) {
+  return absl::LogStreamer(absl::LogSeverity::kWarning, file, line);
+}
+
+// LogErrorStreamer()
+//
+// Returns a LogStreamer that writes at level LogSeverity::kError.
+inline LogStreamer LogErrorStreamer(absl::string_view file, int line) {
+  return absl::LogStreamer(absl::LogSeverity::kError, file, line);
+}
+
+// LogFatalStreamer()
+//
+// Returns a LogStreamer that writes at level LogSeverity::kFatal.
+//
+// The program will be terminated when this `LogStreamer` is destroyed,
+// regardless of whether any data were streamed in.
+inline LogStreamer LogFatalStreamer(absl::string_view file, int line) {
+  return absl::LogStreamer(absl::LogSeverity::kFatal, file, line);
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_LOG_STREAMER_H_
diff --git a/abseil-cpp/absl/log/log_streamer_test.cc b/abseil-cpp/absl/log/log_streamer_test.cc
new file mode 100644
index 0000000..328d70d
--- /dev/null
+++ b/abseil-cpp/absl/log/log_streamer_test.cc
@@ -0,0 +1,365 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/log_streamer.h"
+
+#include <ios>
+#include <iostream>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/internal/test_actions.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/log/scoped_mock_log.h"
+#include "absl/strings/string_view.h"
+
+namespace {
+using ::absl::log_internal::DeathTestExpectedLogging;
+using ::absl::log_internal::DeathTestUnexpectedLogging;
+using ::absl::log_internal::DeathTestValidateExpectations;
+#if GTEST_HAS_DEATH_TEST
+using ::absl::log_internal::DiedOfFatal;
+#endif
+using ::absl::log_internal::LogSeverity;
+using ::absl::log_internal::Prefix;
+using ::absl::log_internal::SourceFilename;
+using ::absl::log_internal::SourceLine;
+using ::absl::log_internal::Stacktrace;
+using ::absl::log_internal::TextMessage;
+using ::absl::log_internal::ThreadID;
+using ::absl::log_internal::TimestampInMatchWindow;
+using ::testing::AnyNumber;
+using ::testing::Eq;
+using ::testing::HasSubstr;
+using ::testing::IsEmpty;
+using ::testing::IsTrue;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+void WriteToStream(absl::string_view data, std::ostream* os) {
+  *os << "WriteToStream: " << data;
+}
+void WriteToStreamRef(absl::string_view data, std::ostream& os) {
+  os << "WriteToStreamRef: " << data;
+}
+
+TEST(LogStreamerTest, LogInfoStreamer) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kInfo)),
+                 TimestampInMatchWindow(),
+                 ThreadID(Eq(absl::base_internal::GetTID())),
+                 TextMessage(Eq("WriteToStream: foo")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "WriteToStream: foo"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  WriteToStream("foo", &absl::LogInfoStreamer("path/file.cc", 1234).stream());
+}
+
+TEST(LogStreamerTest, LogWarningStreamer) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kWarning)),
+                 TimestampInMatchWindow(),
+                 ThreadID(Eq(absl::base_internal::GetTID())),
+                 TextMessage(Eq("WriteToStream: foo")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "WriteToStream: foo"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  WriteToStream("foo",
+                &absl::LogWarningStreamer("path/file.cc", 1234).stream());
+}
+
+TEST(LogStreamerTest, LogErrorStreamer) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
+                 TimestampInMatchWindow(),
+                 ThreadID(Eq(absl::base_internal::GetTID())),
+                 TextMessage(Eq("WriteToStream: foo")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "WriteToStream: foo"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  WriteToStream("foo", &absl::LogErrorStreamer("path/file.cc", 1234).stream());
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(LogStreamerDeathTest, LogFatalStreamer) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+
+        EXPECT_CALL(test_sink, Send)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        EXPECT_CALL(
+            test_sink,
+            Send(AllOf(
+                SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                TimestampInMatchWindow(),
+                ThreadID(Eq(absl::base_internal::GetTID())),
+                TextMessage(Eq("WriteToStream: foo")),
+                ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                   str: "WriteToStream: foo"
+                                                 })pb")))))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        WriteToStream("foo",
+                      &absl::LogFatalStreamer("path/file.cc", 1234).stream());
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+#endif
+
+TEST(LogStreamerTest, LogStreamer) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
+                 TimestampInMatchWindow(),
+                 ThreadID(Eq(absl::base_internal::GetTID())),
+                 TextMessage(Eq("WriteToStream: foo")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "WriteToStream: foo"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  WriteToStream(
+      "foo", &absl::LogStreamer(absl::LogSeverity::kError, "path/file.cc", 1234)
+                  .stream());
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(LogStreamerDeathTest, LogStreamer) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+
+        EXPECT_CALL(test_sink, Send)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        EXPECT_CALL(
+            test_sink,
+            Send(AllOf(
+                SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                TimestampInMatchWindow(),
+                ThreadID(Eq(absl::base_internal::GetTID())),
+                TextMessage(Eq("WriteToStream: foo")),
+                ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                   str: "WriteToStream: foo"
+                                                 })pb")))))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        WriteToStream("foo", &absl::LogStreamer(absl::LogSeverity::kFatal,
+                                                "path/file.cc", 1234)
+                                  .stream());
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+#endif
+
+TEST(LogStreamerTest, PassedByReference) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 TextMessage(Eq("WriteToStreamRef: foo")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "WriteToStreamRef: foo"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  WriteToStreamRef("foo", absl::LogInfoStreamer("path/file.cc", 1234).stream());
+}
+
+TEST(LogStreamerTest, StoredAsLocal) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  auto streamer = absl::LogInfoStreamer("path/file.cc", 1234);
+  WriteToStream("foo", &streamer.stream());
+  streamer.stream() << " ";
+  WriteToStreamRef("bar", streamer.stream());
+
+  // The call should happen when `streamer` goes out of scope; if it
+  // happened before this `EXPECT_CALL` the call would be unexpected and the
+  // test would fail.
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 TextMessage(Eq("WriteToStream: foo WriteToStreamRef: bar")),
+                 ENCODED_MESSAGE(EqualsProto(
+                     R"pb(value {
+                            str: "WriteToStream: foo WriteToStreamRef: bar"
+                          })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(LogStreamerDeathTest, StoredAsLocal) {
+  EXPECT_EXIT(
+      {
+        // This is fatal when it goes out of scope, but not until then:
+        auto streamer = absl::LogFatalStreamer("path/file.cc", 1234);
+        std::cerr << "I'm still alive" << std::endl;
+        WriteToStream("foo", &streamer.stream());
+      },
+      DiedOfFatal, HasSubstr("I'm still alive"));
+}
+#endif
+
+TEST(LogStreamerTest, LogsEmptyLine) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(test_sink, Send(AllOf(SourceFilename(Eq("path/file.cc")),
+                                    SourceLine(Eq(1234)), TextMessage(Eq("")),
+                                    ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                                       str: ""
+                                                                     })pb")),
+                                    Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  absl::LogInfoStreamer("path/file.cc", 1234);
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(LogStreamerDeathTest, LogsEmptyLine) {
+  EXPECT_EXIT(
+      {
+        absl::ScopedMockLog test_sink;
+
+        EXPECT_CALL(test_sink, Log)
+            .Times(AnyNumber())
+            .WillRepeatedly(DeathTestUnexpectedLogging());
+
+        EXPECT_CALL(
+            test_sink,
+            Send(AllOf(
+                SourceFilename(Eq("path/file.cc")), TextMessage(Eq("")),
+                ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "" })pb")))))
+            .WillOnce(DeathTestExpectedLogging());
+
+        test_sink.StartCapturingLogs();
+        // This is fatal even though it's never used:
+        auto streamer = absl::LogFatalStreamer("path/file.cc", 1234);
+      },
+      DiedOfFatal, DeathTestValidateExpectations());
+}
+#endif
+
+TEST(LogStreamerTest, MoveConstruction) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 LogSeverity(Eq(absl::LogSeverity::kInfo)),
+                 TextMessage(Eq("hello 0x10 world 0x10")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "hello 0x10 world 0x10"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234);
+  streamer1.stream() << "hello " << std::hex << 16;
+  absl::LogStreamer streamer2(std::move(streamer1));
+  streamer2.stream() << " world " << 16;
+}
+
+TEST(LogStreamerTest, MoveAssignment) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  testing::InSequence seq;
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file2.cc")), SourceLine(Eq(5678)),
+                 LogSeverity(Eq(absl::LogSeverity::kWarning)),
+                 TextMessage(Eq("something else")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "something else"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+                 LogSeverity(Eq(absl::LogSeverity::kInfo)),
+                 TextMessage(Eq("hello 0x10 world 0x10")),
+                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
+                                                    str: "hello 0x10 world 0x10"
+                                                  })pb")),
+                 Stacktrace(IsEmpty()))));
+
+  test_sink.StartCapturingLogs();
+  auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234);
+  streamer1.stream() << "hello " << std::hex << 16;
+  auto streamer2 = absl::LogWarningStreamer("path/file2.cc", 5678);
+  streamer2.stream() << "something else";
+  streamer2 = std::move(streamer1);
+  streamer2.stream() << " world " << 16;
+}
+
+TEST(LogStreamerTest, CorrectDefaultFlags) {
+  absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+
+  // The `boolalpha` and `showbase` flags should be set by default, to match
+  // `LOG`.
+  EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("false0xdeadbeef")))))
+      .Times(2);
+
+  test_sink.StartCapturingLogs();
+  absl::LogInfoStreamer("path/file.cc", 1234).stream()
+      << false << std::hex << 0xdeadbeef;
+  LOG(INFO) << false << std::hex << 0xdeadbeef;
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/scoped_mock_log.cc b/abseil-cpp/absl/log/scoped_mock_log.cc
new file mode 100644
index 0000000..39a0a52
--- /dev/null
+++ b/abseil-cpp/absl/log/scoped_mock_log.cc
@@ -0,0 +1,86 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/scoped_mock_log.h"
+
+#include <atomic>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+#include "absl/log/log_sink_registry.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+ScopedMockLog::ScopedMockLog(MockLogDefault default_exp)
+    : sink_(this), is_capturing_logs_(false), is_triggered_(false) {
+  if (default_exp == MockLogDefault::kIgnoreUnexpected) {
+    // Ignore all calls to Log we did not set expectations for.
+    EXPECT_CALL(*this, Log).Times(::testing::AnyNumber());
+  } else {
+    // Disallow all calls to Log we did not set expectations for.
+    EXPECT_CALL(*this, Log).Times(0);
+  }
+  // By default Send mock forwards to Log mock.
+  EXPECT_CALL(*this, Send)
+      .Times(::testing::AnyNumber())
+      .WillRepeatedly([this](const absl::LogEntry& entry) {
+        is_triggered_.store(true, std::memory_order_relaxed);
+        Log(entry.log_severity(), std::string(entry.source_filename()),
+            std::string(entry.text_message()));
+      });
+
+  // By default We ignore all Flush calls.
+  EXPECT_CALL(*this, Flush).Times(::testing::AnyNumber());
+}
+
+ScopedMockLog::~ScopedMockLog() {
+  ABSL_RAW_CHECK(is_triggered_.load(std::memory_order_relaxed),
+                 "Did you forget to call StartCapturingLogs()?");
+
+  if (is_capturing_logs_) StopCapturingLogs();
+}
+
+void ScopedMockLog::StartCapturingLogs() {
+  ABSL_RAW_CHECK(!is_capturing_logs_,
+                 "StartCapturingLogs() can be called only when the "
+                 "absl::ScopedMockLog object is not capturing logs.");
+
+  is_capturing_logs_ = true;
+  is_triggered_.store(true, std::memory_order_relaxed);
+  absl::AddLogSink(&sink_);
+}
+
+void ScopedMockLog::StopCapturingLogs() {
+  ABSL_RAW_CHECK(is_capturing_logs_,
+                 "StopCapturingLogs() can be called only when the "
+                 "absl::ScopedMockLog object is capturing logs.");
+
+  is_capturing_logs_ = false;
+  absl::RemoveLogSink(&sink_);
+}
+
+absl::LogSink& ScopedMockLog::UseAsLocalSink() {
+  is_triggered_.store(true, std::memory_order_relaxed);
+  return sink_;
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/log/scoped_mock_log.h b/abseil-cpp/absl/log/scoped_mock_log.h
new file mode 100644
index 0000000..399e604
--- /dev/null
+++ b/abseil-cpp/absl/log/scoped_mock_log.h
@@ -0,0 +1,197 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/scoped_mock_log.h
+// -----------------------------------------------------------------------------
+//
+// This header declares `class absl::ScopedMockLog`, for use in testing.
+
+#ifndef ABSL_LOG_SCOPED_MOCK_LOG_H_
+#define ABSL_LOG_SCOPED_MOCK_LOG_H_
+
+#include <atomic>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "absl/base/config.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/log_entry.h"
+#include "absl/log/log_sink.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// MockLogDefault
+//
+// Controls how ScopedMockLog responds to unexpected calls by default.
+enum class MockLogDefault { kIgnoreUnexpected, kDisallowUnexpected };
+
+// ScopedMockLog
+//
+// ScopedMockLog is a LogSink that intercepts LOG() messages issued during its
+// lifespan.
+//
+// Using this together with GoogleTest, it's easy to test how a piece of code
+// calls LOG(). The typical usage, noting the distinction between
+// "uninteresting" and "unexpected", looks like this:
+//
+//   using ::testing::_;
+//   using ::testing::AnyNumber;
+//   using ::testing::EndsWith;
+//   using ::testing::kDoNotCaptureLogsYet;
+//   using ::testing::Lt;
+//
+//   TEST(FooTest, LogsCorrectly) {
+//     // Simple robust setup, ignores unexpected logs.
+//     absl::ScopedMockLog log;
+//
+//     // We expect the WARNING "Something bad!" exactly twice.
+//     EXPECT_CALL(log, Log(absl::LogSeverity::kWarning, _, "Something bad!"))
+//         .Times(2);
+//
+//     // But we want no messages from foo.cc.
+//     EXPECT_CALL(log, Log(_, EndsWith("/foo.cc"), _)).Times(0);
+//
+//     log.StartCapturingLogs();  // Call this after done setting expectations.
+//     Foo();  // Exercises the code under test.
+//   }
+//
+//   TEST(BarTest, LogsExactlyCorrectly) {
+//     // Strict checking, fails for unexpected logs.
+//     absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
+//
+//     // ... but ignore low severity messages
+//     EXPECT_CALL(log, Log(Lt(absl::LogSeverity::kWarning), _, _))
+//         .Times(AnyNumber());
+//
+//     // We expect the ERROR "Something bad!" exactly once.
+//     EXPECT_CALL(log, Log(absl::LogSeverity::kError, EndsWith("/foo.cc"),
+//                 "Something bad!"))
+//         .Times(1);
+//
+//     log.StartCapturingLogs();  // Call this after done setting expectations.
+//     Bar();  // Exercises the code under test.
+//    }
+//
+// Note that in a multi-threaded environment, all LOG() messages from a single
+// thread will be handled in sequence, but that cannot be guaranteed for
+// messages from different threads. In fact, if the same or multiple
+// expectations are matched on two threads concurrently, their actions will be
+// executed concurrently as well and may interleave.
+class ScopedMockLog final {
+ public:
+  // ScopedMockLog::ScopedMockLog()
+  //
+  // Sets up the log and adds default expectations.
+  explicit ScopedMockLog(
+      MockLogDefault default_exp = MockLogDefault::kIgnoreUnexpected);
+  ScopedMockLog(const ScopedMockLog&) = delete;
+  ScopedMockLog& operator=(const ScopedMockLog&) = delete;
+
+  // ScopedMockLog::~ScopedMockLog()
+  //
+  // Stops intercepting logs and destroys this ScopedMockLog.
+  ~ScopedMockLog();
+
+  // ScopedMockLog::StartCapturingLogs()
+  //
+  // Starts log capturing if the object isn't already doing so. Otherwise
+  // crashes.
+  //
+  // Usually this method is called in the same thread that created this
+  // ScopedMockLog. It is the user's responsibility to not call this method if
+  // another thread may be calling it or StopCapturingLogs() at the same time.
+  // It is undefined behavior to add expectations while capturing logs is
+  // enabled.
+  void StartCapturingLogs();
+
+  // ScopedMockLog::StopCapturingLogs()
+  //
+  // Stops log capturing if the object is capturing logs. Otherwise crashes.
+  //
+  // Usually this method is called in the same thread that created this object.
+  // It is the user's responsibility to not call this method if another thread
+  // may be calling it or StartCapturingLogs() at the same time.
+  //
+  // It is UB to add expectations, while capturing logs is enabled.
+  void StopCapturingLogs();
+
+  // ScopedMockLog::UseAsLocalSink()
+  //
+  // Each `ScopedMockLog` is implemented with an `absl::LogSink`; this method
+  // returns a reference to that sink (e.g. for use with
+  // `LOG(...).ToSinkOnly()`) and marks the `ScopedMockLog` as having been used
+  // even if `StartCapturingLogs` is never called.
+  absl::LogSink& UseAsLocalSink();
+
+  // Implements the mock method:
+  //
+  //   void Log(LogSeverity severity, absl::string_view file_path,
+  //            absl::string_view message);
+  //
+  // The second argument to Log() is the full path of the source file in
+  // which the LOG() was issued.
+  //
+  // This is a shorthand form, which should be used by most users. Use the
+  // `Send` mock only if you want to add expectations for other log message
+  // attributes.
+  MOCK_METHOD(void, Log,
+              (absl::LogSeverity severity, const std::string& file_path,
+               const std::string& message));
+
+  // Implements the mock method:
+  //
+  //   void Send(const absl::LogEntry& entry);
+  //
+  // This is the most generic form of mock that can be specified. Use this mock
+  // only if you want to add expectations for log message attributes different
+  // from the log message text, log message path and log message severity.
+  //
+  // If no expectations are specified for this mock, the default action is to
+  // forward the call to the `Log` mock.
+  MOCK_METHOD(void, Send, (const absl::LogEntry&));
+
+  // Implements the mock method:
+  //
+  //   void Flush();
+  //
+  // Use this mock only if you want to add expectations for log flush calls.
+  MOCK_METHOD(void, Flush, ());
+
+ private:
+  class ForwardingSink final : public absl::LogSink {
+   public:
+    explicit ForwardingSink(ScopedMockLog* sml) : sml_(sml) {}
+    ForwardingSink(const ForwardingSink&) = delete;
+    ForwardingSink& operator=(const ForwardingSink&) = delete;
+    void Send(const absl::LogEntry& entry) override { sml_->Send(entry); }
+    void Flush() override { sml_->Flush(); }
+
+   private:
+    ScopedMockLog* sml_;
+  };
+
+  ForwardingSink sink_;
+  bool is_capturing_logs_;
+  // Until C++20, the default constructor leaves the underlying value wrapped in
+  // std::atomic uninitialized, so all constructors should be sure to initialize
+  // is_triggered_.
+  std::atomic<bool> is_triggered_;
+};
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_SCOPED_MOCK_LOG_H_
diff --git a/abseil-cpp/absl/log/scoped_mock_log_test.cc b/abseil-cpp/absl/log/scoped_mock_log_test.cc
new file mode 100644
index 0000000..4273693
--- /dev/null
+++ b/abseil-cpp/absl/log/scoped_mock_log_test.cc
@@ -0,0 +1,295 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/scoped_mock_log.h"
+
+#include <memory>
+#include <thread>  // NOLINT(build/c++11)
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-spi.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/log/globals.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "absl/synchronization/barrier.h"
+#include "absl/synchronization/notification.h"
+
+namespace {
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::Eq;
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+using ::testing::Lt;
+using ::testing::Truly;
+using absl::log_internal::SourceBasename;
+using absl::log_internal::SourceFilename;
+using absl::log_internal::SourceLine;
+using absl::log_internal::TextMessageWithPrefix;
+using absl::log_internal::ThreadID;
+
+auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+#if GTEST_HAS_DEATH_TEST
+TEST(ScopedMockLogDeathTest,
+     StartCapturingLogsCannotBeCalledWhenAlreadyCapturing) {
+  EXPECT_DEATH(
+      {
+        absl::ScopedMockLog log;
+        log.StartCapturingLogs();
+        log.StartCapturingLogs();
+      },
+      "StartCapturingLogs");
+}
+
+TEST(ScopedMockLogDeathTest, StopCapturingLogsCannotBeCalledWhenNotCapturing) {
+  EXPECT_DEATH(
+      {
+        absl::ScopedMockLog log;
+        log.StopCapturingLogs();
+      },
+      "StopCapturingLogs");
+}
+
+TEST(ScopedMockLogDeathTest, FailsCheckIfStartCapturingLogsIsNeverCalled) {
+  EXPECT_DEATH({ absl::ScopedMockLog log; },
+               "Did you forget to call StartCapturingLogs");
+}
+#endif
+
+// Tests that ScopedMockLog intercepts LOG()s when it's alive.
+TEST(ScopedMockLogTest, LogMockCatchAndMatchStrictExpectations) {
+  absl::ScopedMockLog log;
+
+  // The following expectations must match in the order they appear.
+  InSequence s;
+  EXPECT_CALL(log,
+              Log(absl::LogSeverity::kWarning, HasSubstr(__FILE__), "Danger."));
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Working...")).Times(2);
+  EXPECT_CALL(log, Log(absl::LogSeverity::kError, _, "Bad!!"));
+
+  log.StartCapturingLogs();
+  LOG(WARNING) << "Danger.";
+  LOG(INFO) << "Working...";
+  LOG(INFO) << "Working...";
+  LOG(ERROR) << "Bad!!";
+}
+
+TEST(ScopedMockLogTest, LogMockCatchAndMatchSendExpectations) {
+  absl::ScopedMockLog log;
+
+  EXPECT_CALL(
+      log,
+      Send(AllOf(SourceFilename(Eq("/my/very/very/very_long_source_file.cc")),
+                 SourceBasename(Eq("very_long_source_file.cc")),
+                 SourceLine(Eq(777)), ThreadID(Eq(absl::LogEntry::tid_t{1234})),
+                 TextMessageWithPrefix(Truly([](absl::string_view msg) {
+                   return absl::EndsWith(
+                       msg, " very_long_source_file.cc:777] Info message");
+                 })))));
+
+  log.StartCapturingLogs();
+  LOG(INFO)
+          .AtLocation("/my/very/very/very_long_source_file.cc", 777)
+          .WithThreadID(1234)
+      << "Info message";
+}
+
+TEST(ScopedMockLogTest, ScopedMockLogCanBeNice) {
+  absl::ScopedMockLog log;
+
+  InSequence s;
+  EXPECT_CALL(log,
+              Log(absl::LogSeverity::kWarning, HasSubstr(__FILE__), "Danger."));
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Working...")).Times(2);
+  EXPECT_CALL(log, Log(absl::LogSeverity::kError, _, "Bad!!"));
+
+  log.StartCapturingLogs();
+
+  // Any number of these are OK.
+  LOG(INFO) << "Info message.";
+  // Any number of these are OK.
+  LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
+
+  LOG(WARNING) << "Danger.";
+
+  // Any number of these are OK.
+  LOG(INFO) << "Info message.";
+  // Any number of these are OK.
+  LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
+
+  LOG(INFO) << "Working...";
+
+  // Any number of these are OK.
+  LOG(INFO) << "Info message.";
+  // Any number of these are OK.
+  LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
+
+  LOG(INFO) << "Working...";
+
+  // Any number of these are OK.
+  LOG(INFO) << "Info message.";
+  // Any number of these are OK.
+  LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
+
+  LOG(ERROR) << "Bad!!";
+
+  // Any number of these are OK.
+  LOG(INFO) << "Info message.";
+  // Any number of these are OK.
+  LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
+}
+
+// Tests that ScopedMockLog generates a test failure if a message is logged
+// that is not expected (here, that means ERROR or FATAL).
+TEST(ScopedMockLogTest, RejectsUnexpectedLogs) {
+  EXPECT_NONFATAL_FAILURE(
+      {
+        absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
+        // Any INFO and WARNING messages are permitted.
+        EXPECT_CALL(log, Log(Lt(absl::LogSeverity::kError), _, _))
+            .Times(AnyNumber());
+        log.StartCapturingLogs();
+        LOG(INFO) << "Ignored";
+        LOG(WARNING) << "Ignored";
+        LOG(ERROR) << "Should not be ignored";
+      },
+      "Should not be ignored");
+}
+
+TEST(ScopedMockLogTest, CapturesLogsAfterStartCapturingLogs) {
+  absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfinity);
+  absl::ScopedMockLog log;
+
+  // The ScopedMockLog object shouldn't see these LOGs, as it hasn't
+  // started capturing LOGs yet.
+  LOG(INFO) << "Ignored info";
+  LOG(WARNING) << "Ignored warning";
+  LOG(ERROR) << "Ignored error";
+
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Expected info"));
+  log.StartCapturingLogs();
+
+  // Only this LOG will be seen by the ScopedMockLog.
+  LOG(INFO) << "Expected info";
+}
+
+TEST(ScopedMockLogTest, DoesNotCaptureLogsAfterStopCapturingLogs) {
+  absl::ScopedMockLog log;
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Expected info"));
+
+  log.StartCapturingLogs();
+
+  // This LOG should be seen by the ScopedMockLog.
+  LOG(INFO) << "Expected info";
+
+  log.StopCapturingLogs();
+
+  // The ScopedMockLog object shouldn't see these LOGs, as it has
+  // stopped capturing LOGs.
+  LOG(INFO) << "Ignored info";
+  LOG(WARNING) << "Ignored warning";
+  LOG(ERROR) << "Ignored error";
+}
+
+// Tests that all messages are intercepted regardless of issuing thread. The
+// purpose of this test is NOT to exercise thread-safety.
+TEST(ScopedMockLogTest, LogFromMultipleThreads) {
+  absl::ScopedMockLog log;
+
+  // We don't establish an order to expectations here, since the threads may
+  // execute their log statements in different order.
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread 1"));
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread 2"));
+
+  log.StartCapturingLogs();
+
+  absl::Barrier barrier(2);
+  std::thread thread1([&barrier]() {
+    barrier.Block();
+    LOG(INFO) << "Thread 1";
+  });
+  std::thread thread2([&barrier]() {
+    barrier.Block();
+    LOG(INFO) << "Thread 2";
+  });
+
+  thread1.join();
+  thread2.join();
+}
+
+// Tests that no sequence will be imposed on two LOG message expectations from
+// different threads. This test would actually deadlock if replaced to two LOG
+// statements from the same thread.
+TEST(ScopedMockLogTest, NoSequenceWithMultipleThreads) {
+  absl::ScopedMockLog log;
+
+  absl::Barrier barrier(2);
+  EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, _))
+      .Times(2)
+      .WillRepeatedly([&barrier]() { barrier.Block(); });
+
+  log.StartCapturingLogs();
+
+  std::thread thread1([]() { LOG(INFO) << "Thread 1"; });
+  std::thread thread2([]() { LOG(INFO) << "Thread 2"; });
+
+  thread1.join();
+  thread2.join();
+}
+
+TEST(ScopedMockLogTsanTest,
+     ScopedMockLogCanBeDeletedWhenAnotherThreadIsLogging) {
+  auto log = absl::make_unique<absl::ScopedMockLog>();
+  EXPECT_CALL(*log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread log"))
+      .Times(AnyNumber());
+
+  log->StartCapturingLogs();
+
+  absl::Notification logging_started;
+
+  std::thread thread([&logging_started]() {
+    for (int i = 0; i < 100; ++i) {
+      if (i == 50) logging_started.Notify();
+      LOG(INFO) << "Thread log";
+    }
+  });
+
+  logging_started.WaitForNotification();
+  log.reset();
+  thread.join();
+}
+
+TEST(ScopedMockLogTest, AsLocalSink) {
+  absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
+
+  EXPECT_CALL(log, Log(_, _, "two"));
+  EXPECT_CALL(log, Log(_, _, "three"));
+
+  LOG(INFO) << "one";
+  LOG(INFO).ToSinkOnly(&log.UseAsLocalSink()) << "two";
+  LOG(INFO).ToSinkAlso(&log.UseAsLocalSink()) << "three";
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/stripping_test.cc b/abseil-cpp/absl/log/stripping_test.cc
new file mode 100644
index 0000000..aff9149
--- /dev/null
+++ b/abseil-cpp/absl/log/stripping_test.cc
@@ -0,0 +1,374 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Tests for stripping of literal strings.
+// ---------------------------------------
+//
+// When a `LOG` statement can be trivially proved at compile time to never fire,
+// e.g. due to `ABSL_MIN_LOG_LEVEL`, `NDEBUG`, or some explicit condition, data
+// streamed in can be dropped from the compiled program completely if they are
+// not used elsewhere.  This most commonly affects string literals, which users
+// often want to strip to reduce binary size and/or redact information about
+// their program's internals (e.g. in a release build).
+//
+// These tests log strings and then validate whether they appear in the compiled
+// binary.  This is done by opening the file corresponding to the running test
+// and running a simple string search on its contents.  The strings to be logged
+// and searched for must be unique, and we must take care not to emit them into
+// the binary in any other place, e.g. when searching for them.  The latter is
+// accomplished by computing them using base64; the source string appears in the
+// binary but the target string is computed at runtime.
+
+#include <stdio.h>
+
+#if defined(__MACH__)
+#include <mach-o/dyld.h>
+#elif defined(_WIN32)
+#include <Windows.h>
+#include <tchar.h>
+#endif
+
+#include <algorithm>
+#include <functional>
+#include <memory>
+#include <ostream>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/strerror.h"
+#include "absl/base/log_severity.h"
+#include "absl/flags/internal/program_name.h"
+#include "absl/log/check.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/log.h"
+#include "absl/strings/escaping.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+
+// Set a flag that controls whether we actually execute fatal statements, but
+// prevent the compiler from optimizing it out.
+static volatile bool kReallyDie = false;
+
+namespace {
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::NotNull;
+
+using absl::log_internal::kAbslMinLogLevel;
+
+std::string Base64UnescapeOrDie(absl::string_view data) {
+  std::string decoded;
+  CHECK(absl::Base64Unescape(data, &decoded));
+  return decoded;
+}
+
+// -----------------------------------------------------------------------------
+// A Googletest matcher which searches the running binary for a given string
+// -----------------------------------------------------------------------------
+
+// This matcher is used to validate that literal strings streamed into
+// `LOG` statements that ought to be compiled out (e.g. `LOG_IF(INFO, false)`)
+// do not appear in the binary.
+//
+// Note that passing the string to be sought directly to `FileHasSubstr()` all
+// but forces its inclusion in the binary regardless of the logging library's
+// behavior. For example:
+//
+//   LOG_IF(INFO, false) << "you're the man now dog";
+//   // This will always pass:
+//   // EXPECT_THAT(fp, FileHasSubstr("you're the man now dog"));
+//   // So use this instead:
+//   EXPECT_THAT(fp, FileHasSubstr(
+//       Base64UnescapeOrDie("eW91J3JlIHRoZSBtYW4gbm93IGRvZw==")));
+
+class FileHasSubstrMatcher final : public ::testing::MatcherInterface<FILE*> {
+ public:
+  explicit FileHasSubstrMatcher(absl::string_view needle) : needle_(needle) {}
+
+  bool MatchAndExplain(
+      FILE* fp, ::testing::MatchResultListener* listener) const override {
+    std::string buf(
+        std::max<std::string::size_type>(needle_.size() * 2, 163840000), '\0');
+    size_t buf_start_offset = 0;  // The file offset of the byte at `buf[0]`.
+    size_t buf_data_size = 0;     // The number of bytes of `buf` which contain
+                                  // data.
+
+    ::fseek(fp, 0, SEEK_SET);
+    while (true) {
+      // Fill the buffer to capacity or EOF:
+      while (buf_data_size < buf.size()) {
+        const size_t ret = fread(&buf[buf_data_size], sizeof(char),
+                                 buf.size() - buf_data_size, fp);
+        if (ret == 0) break;
+        buf_data_size += ret;
+      }
+      if (ferror(fp)) {
+        *listener << "error reading file";
+        return false;
+      }
+      const absl::string_view haystack(&buf[0], buf_data_size);
+      const auto off = haystack.find(needle_);
+      if (off != haystack.npos) {
+        *listener << "string found at offset " << buf_start_offset + off;
+        return true;
+      }
+      if (feof(fp)) {
+        *listener << "string not found";
+        return false;
+      }
+      // Copy the end of `buf` to the beginning so we catch matches that span
+      // buffer boundaries.  `buf` and `buf_data_size` are always large enough
+      // that these ranges don't overlap.
+      memcpy(&buf[0], &buf[buf_data_size - needle_.size()], needle_.size());
+      buf_start_offset += buf_data_size - needle_.size();
+      buf_data_size = needle_.size();
+    }
+  }
+  void DescribeTo(std::ostream* os) const override {
+    *os << "contains the string \"" << needle_ << "\" (base64(\""
+        << Base64UnescapeOrDie(needle_) << "\"))";
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "does not ";
+    DescribeTo(os);
+  }
+
+ private:
+  std::string needle_;
+};
+
+class StrippingTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+#ifndef NDEBUG
+    // Non-optimized builds don't necessarily eliminate dead code at all, so we
+    // don't attempt to validate stripping against such builds.
+    GTEST_SKIP() << "StrippingTests skipped since this build is not optimized";
+#elif defined(__EMSCRIPTEN__)
+    // These tests require a way to examine the running binary and look for
+    // strings; there's no portable way to do that.
+    GTEST_SKIP()
+        << "StrippingTests skipped since this platform is not optimized";
+#endif
+  }
+
+  // Opens this program's executable file.  Returns `nullptr` and writes to
+  // `stderr` on failure.
+  std::unique_ptr<FILE, std::function<void(FILE*)>> OpenTestExecutable() {
+#if defined(__linux__)
+    std::unique_ptr<FILE, std::function<void(FILE*)>> fp(
+        fopen("/proc/self/exe", "rb"), [](FILE* fp) { fclose(fp); });
+    if (!fp) {
+      const std::string err = absl::base_internal::StrError(errno);
+      absl::FPrintF(stderr, "Failed to open /proc/self/exe: %s\n", err);
+    }
+    return fp;
+#elif defined(__Fuchsia__)
+    // TODO(b/242579714): We need to restore the test coverage on this platform.
+    std::unique_ptr<FILE, std::function<void(FILE*)>> fp(
+        fopen(absl::StrCat("/pkg/bin/",
+                           absl::flags_internal::ShortProgramInvocationName())
+                  .c_str(),
+              "rb"),
+        [](FILE* fp) { fclose(fp); });
+    if (!fp) {
+      const std::string err = absl::base_internal::StrError(errno);
+      absl::FPrintF(stderr, "Failed to open /pkg/bin/<binary name>: %s\n", err);
+    }
+    return fp;
+#elif defined(__MACH__)
+    uint32_t size = 0;
+    int ret = _NSGetExecutablePath(nullptr, &size);
+    if (ret != -1) {
+      absl::FPrintF(stderr,
+                    "Failed to get executable path: "
+                    "_NSGetExecutablePath(nullptr) returned %d\n",
+                    ret);
+      return nullptr;
+    }
+    std::string path(size, '\0');
+    ret = _NSGetExecutablePath(&path[0], &size);
+    if (ret != 0) {
+      absl::FPrintF(
+          stderr,
+          "Failed to get executable path: _NSGetExecutablePath(buffer) "
+          "returned %d\n",
+          ret);
+      return nullptr;
+    }
+    std::unique_ptr<FILE, std::function<void(FILE*)>> fp(
+        fopen(path.c_str(), "rb"), [](FILE* fp) { fclose(fp); });
+    if (!fp) {
+      const std::string err = absl::base_internal::StrError(errno);
+      absl::FPrintF(stderr, "Failed to open executable at %s: %s\n", path, err);
+    }
+    return fp;
+#elif defined(_WIN32)
+    std::basic_string<TCHAR> path(4096, _T('\0'));
+    while (true) {
+      const uint32_t ret = ::GetModuleFileName(nullptr, &path[0],
+                                               static_cast<DWORD>(path.size()));
+      if (ret == 0) {
+        absl::FPrintF(
+            stderr,
+            "Failed to get executable path: GetModuleFileName(buffer) "
+            "returned 0\n");
+        return nullptr;
+      }
+      if (ret < path.size()) break;
+      path.resize(path.size() * 2, _T('\0'));
+    }
+    std::unique_ptr<FILE, std::function<void(FILE*)>> fp(
+        _tfopen(path.c_str(), _T("rb")), [](FILE* fp) { fclose(fp); });
+    if (!fp) absl::FPrintF(stderr, "Failed to open executable\n");
+    return fp;
+#else
+    absl::FPrintF(stderr,
+                  "OpenTestExecutable() unimplemented on this platform\n");
+    return nullptr;
+#endif
+  }
+
+  ::testing::Matcher<FILE*> FileHasSubstr(absl::string_view needle) {
+    return MakeMatcher(new FileHasSubstrMatcher(needle));
+  }
+};
+
+// This tests whether out methodology for testing stripping works on this
+// platform by looking for one string that definitely ought to be there and one
+// that definitely ought not to.  If this fails, none of the `StrippingTest`s
+// are going to produce meaningful results.
+TEST_F(StrippingTest, Control) {
+  constexpr char kEncodedPositiveControl[] =
+      "U3RyaXBwaW5nVGVzdC5Qb3NpdGl2ZUNvbnRyb2w=";
+  const std::string encoded_negative_control =
+      absl::Base64Escape("StrippingTest.NegativeControl");
+
+  // Verify this mainly so we can encode other strings and know definitely they
+  // won't encode to `kEncodedPositiveControl`.
+  EXPECT_THAT(Base64UnescapeOrDie("U3RyaXBwaW5nVGVzdC5Qb3NpdGl2ZUNvbnRyb2w="),
+              Eq("StrippingTest.PositiveControl"));
+
+  auto exe = OpenTestExecutable();
+  ASSERT_THAT(exe, NotNull());
+  EXPECT_THAT(exe.get(), FileHasSubstr(kEncodedPositiveControl));
+  EXPECT_THAT(exe.get(), Not(FileHasSubstr(encoded_negative_control)));
+}
+
+TEST_F(StrippingTest, Literal) {
+  // We need to load a copy of the needle string into memory (so we can search
+  // for it) without leaving it lying around in plaintext in the executable file
+  // as would happen if we used a literal.  We might (or might not) leave it
+  // lying around later; that's what the tests are for!
+  const std::string needle = absl::Base64Escape("StrippingTest.Literal");
+  LOG(INFO) << "U3RyaXBwaW5nVGVzdC5MaXRlcmFs";
+  auto exe = OpenTestExecutable();
+  ASSERT_THAT(exe, NotNull());
+  if (absl::LogSeverity::kInfo >= kAbslMinLogLevel) {
+    EXPECT_THAT(exe.get(), FileHasSubstr(needle));
+  } else {
+    EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle)));
+  }
+}
+
+TEST_F(StrippingTest, LiteralInExpression) {
+  // We need to load a copy of the needle string into memory (so we can search
+  // for it) without leaving it lying around in plaintext in the executable file
+  // as would happen if we used a literal.  We might (or might not) leave it
+  // lying around later; that's what the tests are for!
+  const std::string needle =
+      absl::Base64Escape("StrippingTest.LiteralInExpression");
+  LOG(INFO) << absl::StrCat("secret: ",
+                            "U3RyaXBwaW5nVGVzdC5MaXRlcmFsSW5FeHByZXNzaW9u");
+  std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable();
+  ASSERT_THAT(exe, NotNull());
+  if (absl::LogSeverity::kInfo >= kAbslMinLogLevel) {
+    EXPECT_THAT(exe.get(), FileHasSubstr(needle));
+  } else {
+    EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle)));
+  }
+}
+
+TEST_F(StrippingTest, Fatal) {
+  // We need to load a copy of the needle string into memory (so we can search
+  // for it) without leaving it lying around in plaintext in the executable file
+  // as would happen if we used a literal.  We might (or might not) leave it
+  // lying around later; that's what the tests are for!
+  const std::string needle = absl::Base64Escape("StrippingTest.Fatal");
+  // We don't care if the LOG statement is actually executed, we're just
+  // checking that it's stripped.
+  if (kReallyDie) LOG(FATAL) << "U3RyaXBwaW5nVGVzdC5GYXRhbA==";
+
+  std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable();
+  ASSERT_THAT(exe, NotNull());
+  if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) {
+    EXPECT_THAT(exe.get(), FileHasSubstr(needle));
+  } else {
+    EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle)));
+  }
+}
+
+TEST_F(StrippingTest, Level) {
+  const std::string needle = absl::Base64Escape("StrippingTest.Level");
+  volatile auto severity = absl::LogSeverity::kWarning;
+  // Ensure that `severity` is not a compile-time constant to prove that
+  // stripping works regardless:
+  LOG(LEVEL(severity)) << "U3RyaXBwaW5nVGVzdC5MZXZlbA==";
+  std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable();
+  ASSERT_THAT(exe, NotNull());
+  if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) {
+    // This can't be stripped at compile-time because it might evaluate to a
+    // level that shouldn't be stripped.
+    EXPECT_THAT(exe.get(), FileHasSubstr(needle));
+  } else {
+#if (defined(_MSC_VER) && !defined(__clang__)) || defined(__APPLE__)
+    // Dead code elimination misses this case.
+#else
+    // All levels should be stripped, so it doesn't matter what the severity
+    // winds up being.
+    EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle)));
+#endif
+  }
+}
+
+TEST_F(StrippingTest, Check) {
+  // Here we also need a variable name with enough entropy that it's unlikely to
+  // appear in the binary by chance.  `volatile` keeps the tautological
+  // comparison (and the rest of the `CHECK`) from being optimized away.
+  const std::string var_needle = absl::Base64Escape("StrippingTestCheckVar");
+  const std::string msg_needle = absl::Base64Escape("StrippingTest.Check");
+  volatile int U3RyaXBwaW5nVGVzdENoZWNrVmFy = 0xCAFE;
+  // We don't care if the CHECK is actually executed, just that stripping works.
+  // Hiding it behind `kReallyDie` works around some overly aggressive
+  // optimizations in older versions of MSVC.
+  if (kReallyDie) {
+    CHECK(U3RyaXBwaW5nVGVzdENoZWNrVmFy != U3RyaXBwaW5nVGVzdENoZWNrVmFy)
+        << "U3RyaXBwaW5nVGVzdC5DaGVjaw==";
+  }
+
+  std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable();
+  ASSERT_THAT(exe, NotNull());
+  if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) {
+    EXPECT_THAT(exe.get(), FileHasSubstr(var_needle));
+    EXPECT_THAT(exe.get(), FileHasSubstr(msg_needle));
+  } else {
+    EXPECT_THAT(exe.get(), Not(FileHasSubstr(var_needle)));
+    EXPECT_THAT(exe.get(), Not(FileHasSubstr(msg_needle)));
+  }
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/log/structured.h b/abseil-cpp/absl/log/structured.h
new file mode 100644
index 0000000..9ad69fb
--- /dev/null
+++ b/abseil-cpp/absl/log/structured.h
@@ -0,0 +1,70 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: log/structured.h
+// -----------------------------------------------------------------------------
+//
+// This header declares APIs supporting structured logging, allowing log
+// statements to be more easily parsed, especially by automated processes.
+//
+// When structured logging is in use, data streamed into a `LOG` statement are
+// encoded as `Value` fields in a `logging.proto.Event` protocol buffer message.
+// The individual data are exposed programmatically to `LogSink`s and to the
+// user via some log reading tools which are able to query the structured data
+// more usefully than would be possible if each message was a single opaque
+// string.  These helpers allow user code to add additional structure to the
+// data they stream.
+
+#ifndef ABSL_LOG_STRUCTURED_H_
+#define ABSL_LOG_STRUCTURED_H_
+
+#include <ostream>
+
+#include "absl/base/config.h"
+#include "absl/log/internal/structured.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// LogAsLiteral()
+//
+// Annotates its argument as a string literal so that structured logging
+// captures it as a `literal` field instead of a `str` field (the default).
+// This does not affect the text representation, only the structure.
+//
+// Streaming `LogAsLiteral(s)` into a `std::ostream` behaves just like streaming
+// `s` directly.
+//
+// Using `LogAsLiteral()` is occasionally appropriate and useful when proxying
+// data logged from another system or another language.  For example:
+//
+//   void Logger::LogString(absl::string_view str, absl::LogSeverity severity,
+//                          const char *file, int line) {
+//     LOG(LEVEL(severity)).AtLocation(file, line) << str;
+//   }
+//   void Logger::LogStringLiteral(absl::string_view str,
+//                                 absl::LogSeverity severity, const char *file,
+//                                 int line) {
+//     LOG(LEVEL(severity)).AtLocation(file, line) << absl::LogAsLiteral(str);
+//   }
+inline log_internal::AsLiteralImpl LogAsLiteral(absl::string_view s) {
+  return log_internal::AsLiteralImpl(s);
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOG_STRUCTURED_H_
diff --git a/abseil-cpp/absl/log/structured_test.cc b/abseil-cpp/absl/log/structured_test.cc
new file mode 100644
index 0000000..490a35d
--- /dev/null
+++ b/abseil-cpp/absl/log/structured_test.cc
@@ -0,0 +1,63 @@
+//
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/log/structured.h"
+
+#include <ios>
+#include <sstream>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/log/internal/test_helpers.h"
+#include "absl/log/internal/test_matchers.h"
+#include "absl/log/log.h"
+#include "absl/log/scoped_mock_log.h"
+
+namespace {
+using ::absl::log_internal::MatchesOstream;
+using ::absl::log_internal::TextMessage;
+using ::testing::Eq;
+
+auto *test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
+    new absl::log_internal::LogTestEnvironment);
+
+// Abseil Logging library uses these by default, so we set them on the
+// `std::ostream` we compare against too.
+std::ios &LoggingDefaults(std::ios &str) {
+  str.setf(std::ios_base::showbase | std::ios_base::boolalpha |
+           std::ios_base::internal);
+  return str;
+}
+
+TEST(StreamingFormatTest, LogAsLiteral) {
+  std::ostringstream stream;
+  const std::string not_a_literal("hello world");
+  stream << LoggingDefaults << absl::LogAsLiteral(not_a_literal);
+
+  absl::ScopedMockLog sink;
+
+  EXPECT_CALL(sink,
+              Send(AllOf(TextMessage(MatchesOstream(stream)),
+                         TextMessage(Eq("hello world")),
+                         ENCODED_MESSAGE(EqualsProto(
+                             R"pb(value { literal: "hello world" })pb")))));
+
+  sink.StartCapturingLogs();
+  LOG(INFO) << absl::LogAsLiteral(not_a_literal);
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/memory/BUILD.bazel b/abseil-cpp/absl/memory/BUILD.bazel
index d2824a0..a93f54a 100644
--- a/abseil-cpp/absl/memory/BUILD.bazel
+++ b/abseil-cpp/absl/memory/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -30,6 +29,9 @@
     name = "memory",
     hdrs = ["memory.h"],
     copts = ABSL_DEFAULT_COPTS,
+    defines = select({
+        "//conditions:default": [],
+    }),
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:core_headers",
@@ -48,18 +50,3 @@
         "@com_google_googletest//:gtest_main",
     ],
 )
-
-cc_test(
-    name = "memory_exception_safety_test",
-    srcs = [
-        "memory_exception_safety_test.cc",
-    ],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":memory",
-        "//absl/base:config",
-        "//absl/base:exception_safety_testing",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
diff --git a/abseil-cpp/absl/memory/CMakeLists.txt b/abseil-cpp/absl/memory/CMakeLists.txt
index 78fb7e1..c5ed4b4 100644
--- a/abseil-cpp/absl/memory/CMakeLists.txt
+++ b/abseil-cpp/absl/memory/CMakeLists.txt
@@ -37,19 +37,5 @@
   DEPS
     absl::memory
     absl::core_headers
-    gmock_main
-)
-
-absl_cc_test(
-  NAME
-    memory_exception_safety_test
-  SRCS
-    "memory_exception_safety_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::memory
-    absl::config
-    absl::exception_safety_testing
-    gmock_main
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/memory/memory.h b/abseil-cpp/absl/memory/memory.h
index 2b5ff62..3508135 100644
--- a/abseil-cpp/absl/memory/memory.h
+++ b/abseil-cpp/absl/memory/memory.h
@@ -75,32 +75,6 @@
   return std::unique_ptr<T>(ptr);
 }
 
-namespace memory_internal {
-
-// Traits to select proper overload and return type for `absl::make_unique<>`.
-template <typename T>
-struct MakeUniqueResult {
-  using scalar = std::unique_ptr<T>;
-};
-template <typename T>
-struct MakeUniqueResult<T[]> {
-  using array = std::unique_ptr<T[]>;
-};
-template <typename T, size_t N>
-struct MakeUniqueResult<T[N]> {
-  using invalid = void;
-};
-
-}  // namespace memory_internal
-
-// gcc 4.8 has __cplusplus at 201301 but the libstdc++ shipped with it doesn't
-// define make_unique.  Other supported compilers either just define __cplusplus
-// as 201103 but have make_unique (msvc), or have make_unique whenever
-// __cplusplus > 201103 (clang).
-#if (__cplusplus > 201103L || defined(_MSC_VER)) && \
-    !(defined(__GLIBCXX__) && !defined(__cpp_lib_make_unique))
-using std::make_unique;
-#else
 // -----------------------------------------------------------------------------
 // Function Template: make_unique<T>()
 // -----------------------------------------------------------------------------
@@ -109,82 +83,18 @@
 // during the construction process. `absl::make_unique<>` also avoids redundant
 // type declarations, by avoiding the need to explicitly use the `new` operator.
 //
-// This implementation of `absl::make_unique<>` is designed for C++11 code and
-// will be replaced in C++14 by the equivalent `std::make_unique<>` abstraction.
-// `absl::make_unique<>` is designed to be 100% compatible with
-// `std::make_unique<>` so that the eventual migration will involve a simple
-// rename operation.
+// https://en.cppreference.com/w/cpp/memory/unique_ptr/make_unique
 //
 // For more background on why `std::unique_ptr<T>(new T(a,b))` is problematic,
 // see Herb Sutter's explanation on
 // (Exception-Safe Function Calls)[https://herbsutter.com/gotw/_102/].
 // (In general, reviewers should treat `new T(a,b)` with scrutiny.)
 //
-// Example usage:
-//
-//    auto p = make_unique<X>(args...);  // 'p'  is a std::unique_ptr<X>
-//    auto pa = make_unique<X[]>(5);     // 'pa' is a std::unique_ptr<X[]>
-//
-// Three overloads of `absl::make_unique` are required:
-//
-//   - For non-array T:
-//
-//       Allocates a T with `new T(std::forward<Args> args...)`,
-//       forwarding all `args` to T's constructor.
-//       Returns a `std::unique_ptr<T>` owning that object.
-//
-//   - For an array of unknown bounds T[]:
-//
-//       `absl::make_unique<>` will allocate an array T of type U[] with
-//       `new U[n]()` and return a `std::unique_ptr<U[]>` owning that array.
-//
-//       Note that 'U[n]()' is different from 'U[n]', and elements will be
-//       value-initialized. Note as well that `std::unique_ptr` will perform its
-//       own destruction of the array elements upon leaving scope, even though
-//       the array [] does not have a default destructor.
-//
-//       NOTE: an array of unknown bounds T[] may still be (and often will be)
-//       initialized to have a size, and will still use this overload. E.g:
-//
-//         auto my_array = absl::make_unique<int[]>(10);
-//
-//   - For an array of known bounds T[N]:
-//
-//       `absl::make_unique<>` is deleted (like with `std::make_unique<>`) as
-//       this overload is not useful.
-//
-//       NOTE: an array of known bounds T[N] is not considered a useful
-//       construction, and may cause undefined behavior in templates. E.g:
-//
-//         auto my_array = absl::make_unique<int[10]>();
-//
-//       In those cases, of course, you can still use the overload above and
-//       simply initialize it to its desired size:
-//
-//         auto my_array = absl::make_unique<int[]>(10);
-
-// `absl::make_unique` overload for non-array types.
-template <typename T, typename... Args>
-typename memory_internal::MakeUniqueResult<T>::scalar make_unique(
-    Args&&... args) {
-  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
-}
-
-// `absl::make_unique` overload for an array T[] of unknown bounds.
-// The array allocation needs to use the `new T[size]` form and cannot take
-// element constructor arguments. The `std::unique_ptr` will manage destructing
-// these array elements.
-template <typename T>
-typename memory_internal::MakeUniqueResult<T>::array make_unique(size_t n) {
-  return std::unique_ptr<T>(new typename absl::remove_extent_t<T>[n]());
-}
-
-// `absl::make_unique` overload for an array T[N] of known bounds.
-// This construction will be rejected.
-template <typename T, typename... Args>
-typename memory_internal::MakeUniqueResult<T>::invalid make_unique(
-    Args&&... /* args */) = delete;
-#endif
+// Historical note: Abseil once provided a C++11 compatible implementation of
+// the C++14's `std::make_unique`. Now that C++11 support has been sunsetted,
+// `absl::make_unique` simply uses the STL-provided implementation. New code
+// should use `std::make_unique`.
+using std::make_unique;
 
 // -----------------------------------------------------------------------------
 // Function Template: RawPtr()
@@ -248,6 +158,26 @@
   return std::weak_ptr<T>(ptr);
 }
 
+// -----------------------------------------------------------------------------
+// Class Template: pointer_traits
+// -----------------------------------------------------------------------------
+//
+// Historical note: Abseil once provided an implementation of
+// `std::pointer_traits` for platforms that had not yet provided it. Those
+// platforms are no longer supported. New code should simply use
+// `std::pointer_traits`.
+using std::pointer_traits;
+
+// -----------------------------------------------------------------------------
+// Class Template: allocator_traits
+// -----------------------------------------------------------------------------
+//
+// Historical note: Abseil once provided an implementation of
+// `std::allocator_traits` for platforms that had not yet provided it. Those
+// platforms are no longer supported. New code should simply use
+// `std::allocator_traits`.
+using std::allocator_traits;
+
 namespace memory_internal {
 
 // ExtractOr<E, O, D>::type evaluates to E<O> if possible. Otherwise, D.
@@ -265,357 +195,6 @@
 template <template <typename> class Extract, typename Obj, typename Default>
 using ExtractOrT = typename ExtractOr<Extract, Obj, Default, void>::type;
 
-// Extractors for the features of allocators.
-template <typename T>
-using GetPointer = typename T::pointer;
-
-template <typename T>
-using GetConstPointer = typename T::const_pointer;
-
-template <typename T>
-using GetVoidPointer = typename T::void_pointer;
-
-template <typename T>
-using GetConstVoidPointer = typename T::const_void_pointer;
-
-template <typename T>
-using GetDifferenceType = typename T::difference_type;
-
-template <typename T>
-using GetSizeType = typename T::size_type;
-
-template <typename T>
-using GetPropagateOnContainerCopyAssignment =
-    typename T::propagate_on_container_copy_assignment;
-
-template <typename T>
-using GetPropagateOnContainerMoveAssignment =
-    typename T::propagate_on_container_move_assignment;
-
-template <typename T>
-using GetPropagateOnContainerSwap = typename T::propagate_on_container_swap;
-
-template <typename T>
-using GetIsAlwaysEqual = typename T::is_always_equal;
-
-template <typename T>
-struct GetFirstArg;
-
-template <template <typename...> class Class, typename T, typename... Args>
-struct GetFirstArg<Class<T, Args...>> {
-  using type = T;
-};
-
-template <typename Ptr, typename = void>
-struct ElementType {
-  using type = typename GetFirstArg<Ptr>::type;
-};
-
-template <typename T>
-struct ElementType<T, void_t<typename T::element_type>> {
-  using type = typename T::element_type;
-};
-
-template <typename T, typename U>
-struct RebindFirstArg;
-
-template <template <typename...> class Class, typename T, typename... Args,
-          typename U>
-struct RebindFirstArg<Class<T, Args...>, U> {
-  using type = Class<U, Args...>;
-};
-
-template <typename T, typename U, typename = void>
-struct RebindPtr {
-  using type = typename RebindFirstArg<T, U>::type;
-};
-
-template <typename T, typename U>
-struct RebindPtr<T, U, void_t<typename T::template rebind<U>>> {
-  using type = typename T::template rebind<U>;
-};
-
-template <typename T, typename U>
-constexpr bool HasRebindAlloc(...) {
-  return false;
-}
-
-template <typename T, typename U>
-constexpr bool HasRebindAlloc(typename T::template rebind<U>::other*) {
-  return true;
-}
-
-template <typename T, typename U, bool = HasRebindAlloc<T, U>(nullptr)>
-struct RebindAlloc {
-  using type = typename RebindFirstArg<T, U>::type;
-};
-
-template <typename T, typename U>
-struct RebindAlloc<T, U, true> {
-  using type = typename T::template rebind<U>::other;
-};
-
-}  // namespace memory_internal
-
-// -----------------------------------------------------------------------------
-// Class Template: pointer_traits
-// -----------------------------------------------------------------------------
-//
-// An implementation of C++11's std::pointer_traits.
-//
-// Provided for portability on toolchains that have a working C++11 compiler,
-// but the standard library is lacking in C++11 support. For example, some
-// version of the Android NDK.
-//
-
-template <typename Ptr>
-struct pointer_traits {
-  using pointer = Ptr;
-
-  // element_type:
-  // Ptr::element_type if present. Otherwise T if Ptr is a template
-  // instantiation Template<T, Args...>
-  using element_type = typename memory_internal::ElementType<Ptr>::type;
-
-  // difference_type:
-  // Ptr::difference_type if present, otherwise std::ptrdiff_t
-  using difference_type =
-      memory_internal::ExtractOrT<memory_internal::GetDifferenceType, Ptr,
-                                  std::ptrdiff_t>;
-
-  // rebind:
-  // Ptr::rebind<U> if exists, otherwise Template<U, Args...> if Ptr is a
-  // template instantiation Template<T, Args...>
-  template <typename U>
-  using rebind = typename memory_internal::RebindPtr<Ptr, U>::type;
-
-  // pointer_to:
-  // Calls Ptr::pointer_to(r)
-  static pointer pointer_to(element_type& r) {  // NOLINT(runtime/references)
-    return Ptr::pointer_to(r);
-  }
-};
-
-// Specialization for T*.
-template <typename T>
-struct pointer_traits<T*> {
-  using pointer = T*;
-  using element_type = T;
-  using difference_type = std::ptrdiff_t;
-
-  template <typename U>
-  using rebind = U*;
-
-  // pointer_to:
-  // Calls std::addressof(r)
-  static pointer pointer_to(
-      element_type& r) noexcept {  // NOLINT(runtime/references)
-    return std::addressof(r);
-  }
-};
-
-// -----------------------------------------------------------------------------
-// Class Template: allocator_traits
-// -----------------------------------------------------------------------------
-//
-// A C++11 compatible implementation of C++17's std::allocator_traits.
-//
-#if __cplusplus >= 201703L
-using std::allocator_traits;
-#else  // __cplusplus >= 201703L
-template <typename Alloc>
-struct allocator_traits {
-  using allocator_type = Alloc;
-
-  // value_type:
-  // Alloc::value_type
-  using value_type = typename Alloc::value_type;
-
-  // pointer:
-  // Alloc::pointer if present, otherwise value_type*
-  using pointer = memory_internal::ExtractOrT<memory_internal::GetPointer,
-                                              Alloc, value_type*>;
-
-  // const_pointer:
-  // Alloc::const_pointer if present, otherwise
-  // absl::pointer_traits<pointer>::rebind<const value_type>
-  using const_pointer =
-      memory_internal::ExtractOrT<memory_internal::GetConstPointer, Alloc,
-                                  typename absl::pointer_traits<pointer>::
-                                      template rebind<const value_type>>;
-
-  // void_pointer:
-  // Alloc::void_pointer if present, otherwise
-  // absl::pointer_traits<pointer>::rebind<void>
-  using void_pointer = memory_internal::ExtractOrT<
-      memory_internal::GetVoidPointer, Alloc,
-      typename absl::pointer_traits<pointer>::template rebind<void>>;
-
-  // const_void_pointer:
-  // Alloc::const_void_pointer if present, otherwise
-  // absl::pointer_traits<pointer>::rebind<const void>
-  using const_void_pointer = memory_internal::ExtractOrT<
-      memory_internal::GetConstVoidPointer, Alloc,
-      typename absl::pointer_traits<pointer>::template rebind<const void>>;
-
-  // difference_type:
-  // Alloc::difference_type if present, otherwise
-  // absl::pointer_traits<pointer>::difference_type
-  using difference_type = memory_internal::ExtractOrT<
-      memory_internal::GetDifferenceType, Alloc,
-      typename absl::pointer_traits<pointer>::difference_type>;
-
-  // size_type:
-  // Alloc::size_type if present, otherwise
-  // std::make_unsigned<difference_type>::type
-  using size_type = memory_internal::ExtractOrT<
-      memory_internal::GetSizeType, Alloc,
-      typename std::make_unsigned<difference_type>::type>;
-
-  // propagate_on_container_copy_assignment:
-  // Alloc::propagate_on_container_copy_assignment if present, otherwise
-  // std::false_type
-  using propagate_on_container_copy_assignment = memory_internal::ExtractOrT<
-      memory_internal::GetPropagateOnContainerCopyAssignment, Alloc,
-      std::false_type>;
-
-  // propagate_on_container_move_assignment:
-  // Alloc::propagate_on_container_move_assignment if present, otherwise
-  // std::false_type
-  using propagate_on_container_move_assignment = memory_internal::ExtractOrT<
-      memory_internal::GetPropagateOnContainerMoveAssignment, Alloc,
-      std::false_type>;
-
-  // propagate_on_container_swap:
-  // Alloc::propagate_on_container_swap if present, otherwise std::false_type
-  using propagate_on_container_swap =
-      memory_internal::ExtractOrT<memory_internal::GetPropagateOnContainerSwap,
-                                  Alloc, std::false_type>;
-
-  // is_always_equal:
-  // Alloc::is_always_equal if present, otherwise std::is_empty<Alloc>::type
-  using is_always_equal =
-      memory_internal::ExtractOrT<memory_internal::GetIsAlwaysEqual, Alloc,
-                                  typename std::is_empty<Alloc>::type>;
-
-  // rebind_alloc:
-  // Alloc::rebind<T>::other if present, otherwise Alloc<T, Args> if this Alloc
-  // is Alloc<U, Args>
-  template <typename T>
-  using rebind_alloc = typename memory_internal::RebindAlloc<Alloc, T>::type;
-
-  // rebind_traits:
-  // absl::allocator_traits<rebind_alloc<T>>
-  template <typename T>
-  using rebind_traits = absl::allocator_traits<rebind_alloc<T>>;
-
-  // allocate(Alloc& a, size_type n):
-  // Calls a.allocate(n)
-  static pointer allocate(Alloc& a,  // NOLINT(runtime/references)
-                          size_type n) {
-    return a.allocate(n);
-  }
-
-  // allocate(Alloc& a, size_type n, const_void_pointer hint):
-  // Calls a.allocate(n, hint) if possible.
-  // If not possible, calls a.allocate(n)
-  static pointer allocate(Alloc& a, size_type n,  // NOLINT(runtime/references)
-                          const_void_pointer hint) {
-    return allocate_impl(0, a, n, hint);
-  }
-
-  // deallocate(Alloc& a, pointer p, size_type n):
-  // Calls a.deallocate(p, n)
-  static void deallocate(Alloc& a, pointer p,  // NOLINT(runtime/references)
-                         size_type n) {
-    a.deallocate(p, n);
-  }
-
-  // construct(Alloc& a, T* p, Args&&... args):
-  // Calls a.construct(p, std::forward<Args>(args)...) if possible.
-  // If not possible, calls
-  //   ::new (static_cast<void*>(p)) T(std::forward<Args>(args)...)
-  template <typename T, typename... Args>
-  static void construct(Alloc& a, T* p,  // NOLINT(runtime/references)
-                        Args&&... args) {
-    construct_impl(0, a, p, std::forward<Args>(args)...);
-  }
-
-  // destroy(Alloc& a, T* p):
-  // Calls a.destroy(p) if possible. If not possible, calls p->~T().
-  template <typename T>
-  static void destroy(Alloc& a, T* p) {  // NOLINT(runtime/references)
-    destroy_impl(0, a, p);
-  }
-
-  // max_size(const Alloc& a):
-  // Returns a.max_size() if possible. If not possible, returns
-  //   std::numeric_limits<size_type>::max() / sizeof(value_type)
-  static size_type max_size(const Alloc& a) { return max_size_impl(0, a); }
-
-  // select_on_container_copy_construction(const Alloc& a):
-  // Returns a.select_on_container_copy_construction() if possible.
-  // If not possible, returns a.
-  static Alloc select_on_container_copy_construction(const Alloc& a) {
-    return select_on_container_copy_construction_impl(0, a);
-  }
-
- private:
-  template <typename A>
-  static auto allocate_impl(int, A& a,  // NOLINT(runtime/references)
-                            size_type n, const_void_pointer hint)
-      -> decltype(a.allocate(n, hint)) {
-    return a.allocate(n, hint);
-  }
-  static pointer allocate_impl(char, Alloc& a,  // NOLINT(runtime/references)
-                               size_type n, const_void_pointer) {
-    return a.allocate(n);
-  }
-
-  template <typename A, typename... Args>
-  static auto construct_impl(int, A& a,  // NOLINT(runtime/references)
-                             Args&&... args)
-      -> decltype(a.construct(std::forward<Args>(args)...)) {
-    a.construct(std::forward<Args>(args)...);
-  }
-
-  template <typename T, typename... Args>
-  static void construct_impl(char, Alloc&, T* p, Args&&... args) {
-    ::new (static_cast<void*>(p)) T(std::forward<Args>(args)...);
-  }
-
-  template <typename A, typename T>
-  static auto destroy_impl(int, A& a,  // NOLINT(runtime/references)
-                           T* p) -> decltype(a.destroy(p)) {
-    a.destroy(p);
-  }
-  template <typename T>
-  static void destroy_impl(char, Alloc&, T* p) {
-    p->~T();
-  }
-
-  template <typename A>
-  static auto max_size_impl(int, const A& a) -> decltype(a.max_size()) {
-    return a.max_size();
-  }
-  static size_type max_size_impl(char, const Alloc&) {
-    return (std::numeric_limits<size_type>::max)() / sizeof(value_type);
-  }
-
-  template <typename A>
-  static auto select_on_container_copy_construction_impl(int, const A& a)
-      -> decltype(a.select_on_container_copy_construction()) {
-    return a.select_on_container_copy_construction();
-  }
-  static Alloc select_on_container_copy_construction_impl(char,
-                                                          const Alloc& a) {
-    return a;
-  }
-};
-#endif  // __cplusplus >= 201703L
-
-namespace memory_internal {
-
 // This template alias transforms Alloc::is_nothrow into a metafunction with
 // Alloc as a parameter so it can be used with ExtractOrT<>.
 template <typename Alloc>
diff --git a/abseil-cpp/absl/memory/memory_exception_safety_test.cc b/abseil-cpp/absl/memory/memory_exception_safety_test.cc
deleted file mode 100644
index 1df7261..0000000
--- a/abseil-cpp/absl/memory/memory_exception_safety_test.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/memory/memory.h"
-
-#include "absl/base/config.h"
-
-#ifdef ABSL_HAVE_EXCEPTIONS
-
-#include "gtest/gtest.h"
-#include "absl/base/internal/exception_safety_testing.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace {
-
-constexpr int kLength = 50;
-using Thrower = testing::ThrowingValue<testing::TypeSpec::kEverythingThrows>;
-
-TEST(MakeUnique, CheckForLeaks) {
-  constexpr int kValue = 321;
-  auto tester = testing::MakeExceptionSafetyTester()
-                    .WithInitialValue(Thrower(kValue))
-                    // Ensures make_unique does not modify the input. The real
-                    // test, though, is ConstructorTracker checking for leaks.
-                    .WithContracts(testing::strong_guarantee);
-
-  EXPECT_TRUE(tester.Test([](Thrower* thrower) {
-    static_cast<void>(absl::make_unique<Thrower>(*thrower));
-  }));
-
-  EXPECT_TRUE(tester.Test([](Thrower* thrower) {
-    static_cast<void>(absl::make_unique<Thrower>(std::move(*thrower)));
-  }));
-
-  // Test T[n] overload
-  EXPECT_TRUE(tester.Test([&](Thrower*) {
-    static_cast<void>(absl::make_unique<Thrower[]>(kLength));
-  }));
-}
-
-}  // namespace
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_HAVE_EXCEPTIONS
diff --git a/abseil-cpp/absl/memory/memory_test.cc b/abseil-cpp/absl/memory/memory_test.cc
index 1990c7b..fafd3a4 100644
--- a/abseil-cpp/absl/memory/memory_test.cc
+++ b/abseil-cpp/absl/memory/memory_test.cc
@@ -63,12 +63,6 @@
   }
   EXPECT_EQ(0, DestructorVerifier::instance_count());
 }
-TEST(MakeUniqueTest, Basic) {
-  std::unique_ptr<std::string> p = absl::make_unique<std::string>();
-  EXPECT_EQ("", *p);
-  p = absl::make_unique<std::string>("hi");
-  EXPECT_EQ("hi", *p);
-}
 
 // InitializationVerifier fills in a pattern when allocated so we can
 // distinguish between its default and value initialized states (without
@@ -93,65 +87,6 @@
   int b;
 };
 
-TEST(Initialization, MakeUnique) {
-  auto p = absl::make_unique<InitializationVerifier>();
-
-  EXPECT_EQ(0, p->a);
-  EXPECT_EQ(0, p->b);
-}
-
-TEST(Initialization, MakeUniqueArray) {
-  auto p = absl::make_unique<InitializationVerifier[]>(2);
-
-  EXPECT_EQ(0, p[0].a);
-  EXPECT_EQ(0, p[0].b);
-  EXPECT_EQ(0, p[1].a);
-  EXPECT_EQ(0, p[1].b);
-}
-
-struct MoveOnly {
-  MoveOnly() = default;
-  explicit MoveOnly(int i1) : ip1{new int{i1}} {}
-  MoveOnly(int i1, int i2) : ip1{new int{i1}}, ip2{new int{i2}} {}
-  std::unique_ptr<int> ip1;
-  std::unique_ptr<int> ip2;
-};
-
-struct AcceptMoveOnly {
-  explicit AcceptMoveOnly(MoveOnly m) : m_(std::move(m)) {}
-  MoveOnly m_;
-};
-
-TEST(MakeUniqueTest, MoveOnlyTypeAndValue) {
-  using ExpectedType = std::unique_ptr<MoveOnly>;
-  {
-    auto p = absl::make_unique<MoveOnly>();
-    static_assert(std::is_same<decltype(p), ExpectedType>::value,
-                  "unexpected return type");
-    EXPECT_TRUE(!p->ip1);
-    EXPECT_TRUE(!p->ip2);
-  }
-  {
-    auto p = absl::make_unique<MoveOnly>(1);
-    static_assert(std::is_same<decltype(p), ExpectedType>::value,
-                  "unexpected return type");
-    EXPECT_TRUE(p->ip1 && *p->ip1 == 1);
-    EXPECT_TRUE(!p->ip2);
-  }
-  {
-    auto p = absl::make_unique<MoveOnly>(1, 2);
-    static_assert(std::is_same<decltype(p), ExpectedType>::value,
-                  "unexpected return type");
-    EXPECT_TRUE(p->ip1 && *p->ip1 == 1);
-    EXPECT_TRUE(p->ip2 && *p->ip2 == 2);
-  }
-}
-
-TEST(MakeUniqueTest, AcceptMoveOnly) {
-  auto p = absl::make_unique<AcceptMoveOnly>(MoveOnly());
-  p = std::unique_ptr<AcceptMoveOnly>(new AcceptMoveOnly(MoveOnly()));
-}
-
 struct ArrayWatch {
   void* operator new[](size_t n) {
     allocs().push_back(n);
@@ -164,38 +99,6 @@
   }
 };
 
-TEST(Make_UniqueTest, Array) {
-  // Ensure state is clean before we start so that these tests
-  // are order-agnostic.
-  ArrayWatch::allocs().clear();
-
-  auto p = absl::make_unique<ArrayWatch[]>(5);
-  static_assert(std::is_same<decltype(p), std::unique_ptr<ArrayWatch[]>>::value,
-                "unexpected return type");
-  EXPECT_THAT(ArrayWatch::allocs(), ElementsAre(5 * sizeof(ArrayWatch)));
-}
-
-TEST(Make_UniqueTest, NotAmbiguousWithStdMakeUnique) {
-  // Ensure that absl::make_unique is not ambiguous with std::make_unique.
-  // In C++14 mode, the below call to make_unique has both types as candidates.
-  struct TakesStdType {
-    explicit TakesStdType(const std::vector<int>& vec) {}
-  };
-  using absl::make_unique;
-  (void)make_unique<TakesStdType>(std::vector<int>());
-}
-
-#if 0
-// These tests shouldn't compile.
-TEST(MakeUniqueTestNC, AcceptMoveOnlyLvalue) {
-  auto m = MoveOnly();
-  auto p = absl::make_unique<AcceptMoveOnly>(m);
-}
-TEST(MakeUniqueTestNC, KnownBoundArray) {
-  auto p = absl::make_unique<ArrayWatch[5]>();
-}
-#endif
-
 TEST(RawPtrTest, RawPointer) {
   int i = 5;
   EXPECT_EQ(&i, absl::RawPtr(&i));
@@ -287,337 +190,6 @@
 }
 */
 
-template <typename T>
-struct SmartPointer {
-  using difference_type = char;
-};
-
-struct PointerWith {
-  using element_type = int32_t;
-  using difference_type = int16_t;
-  template <typename U>
-  using rebind = SmartPointer<U>;
-
-  static PointerWith pointer_to(
-      element_type& r) {  // NOLINT(runtime/references)
-    return PointerWith{&r};
-  }
-
-  element_type* ptr;
-};
-
-template <typename... Args>
-struct PointerWithout {};
-
-TEST(PointerTraits, Types) {
-  using TraitsWith = absl::pointer_traits<PointerWith>;
-  EXPECT_TRUE((std::is_same<TraitsWith::pointer, PointerWith>::value));
-  EXPECT_TRUE((std::is_same<TraitsWith::element_type, int32_t>::value));
-  EXPECT_TRUE((std::is_same<TraitsWith::difference_type, int16_t>::value));
-  EXPECT_TRUE((
-      std::is_same<TraitsWith::rebind<int64_t>, SmartPointer<int64_t>>::value));
-
-  using TraitsWithout = absl::pointer_traits<PointerWithout<double, int>>;
-  EXPECT_TRUE((std::is_same<TraitsWithout::pointer,
-                            PointerWithout<double, int>>::value));
-  EXPECT_TRUE((std::is_same<TraitsWithout::element_type, double>::value));
-  EXPECT_TRUE(
-      (std::is_same<TraitsWithout ::difference_type, std::ptrdiff_t>::value));
-  EXPECT_TRUE((std::is_same<TraitsWithout::rebind<int64_t>,
-                            PointerWithout<int64_t, int>>::value));
-
-  using TraitsRawPtr = absl::pointer_traits<char*>;
-  EXPECT_TRUE((std::is_same<TraitsRawPtr::pointer, char*>::value));
-  EXPECT_TRUE((std::is_same<TraitsRawPtr::element_type, char>::value));
-  EXPECT_TRUE(
-      (std::is_same<TraitsRawPtr::difference_type, std::ptrdiff_t>::value));
-  EXPECT_TRUE((std::is_same<TraitsRawPtr::rebind<int64_t>, int64_t*>::value));
-}
-
-TEST(PointerTraits, Functions) {
-  int i;
-  EXPECT_EQ(&i, absl::pointer_traits<PointerWith>::pointer_to(i).ptr);
-  EXPECT_EQ(&i, absl::pointer_traits<int*>::pointer_to(i));
-}
-
-TEST(AllocatorTraits, Typedefs) {
-  struct A {
-    struct value_type {};
-  };
-  EXPECT_TRUE((
-      std::is_same<A,
-                   typename absl::allocator_traits<A>::allocator_type>::value));
-  EXPECT_TRUE(
-      (std::is_same<A::value_type,
-                    typename absl::allocator_traits<A>::value_type>::value));
-
-  struct X {};
-  struct HasPointer {
-    using value_type = X;
-    using pointer = SmartPointer<X>;
-  };
-  EXPECT_TRUE((std::is_same<SmartPointer<X>, typename absl::allocator_traits<
-                                                 HasPointer>::pointer>::value));
-  EXPECT_TRUE(
-      (std::is_same<A::value_type*,
-                    typename absl::allocator_traits<A>::pointer>::value));
-
-  EXPECT_TRUE(
-      (std::is_same<
-          SmartPointer<const X>,
-          typename absl::allocator_traits<HasPointer>::const_pointer>::value));
-  EXPECT_TRUE(
-      (std::is_same<const A::value_type*,
-                    typename absl::allocator_traits<A>::const_pointer>::value));
-
-  struct HasVoidPointer {
-    using value_type = X;
-    struct void_pointer {};
-  };
-
-  EXPECT_TRUE((std::is_same<HasVoidPointer::void_pointer,
-                            typename absl::allocator_traits<
-                                HasVoidPointer>::void_pointer>::value));
-  EXPECT_TRUE(
-      (std::is_same<SmartPointer<void>, typename absl::allocator_traits<
-                                            HasPointer>::void_pointer>::value));
-
-  struct HasConstVoidPointer {
-    using value_type = X;
-    struct const_void_pointer {};
-  };
-
-  EXPECT_TRUE(
-      (std::is_same<HasConstVoidPointer::const_void_pointer,
-                    typename absl::allocator_traits<
-                        HasConstVoidPointer>::const_void_pointer>::value));
-  EXPECT_TRUE((std::is_same<SmartPointer<const void>,
-                            typename absl::allocator_traits<
-                                HasPointer>::const_void_pointer>::value));
-
-  struct HasDifferenceType {
-    using value_type = X;
-    using difference_type = int;
-  };
-  EXPECT_TRUE(
-      (std::is_same<int, typename absl::allocator_traits<
-                             HasDifferenceType>::difference_type>::value));
-  EXPECT_TRUE((std::is_same<char, typename absl::allocator_traits<
-                                      HasPointer>::difference_type>::value));
-
-  struct HasSizeType {
-    using value_type = X;
-    using size_type = unsigned int;
-  };
-  EXPECT_TRUE((std::is_same<unsigned int, typename absl::allocator_traits<
-                                              HasSizeType>::size_type>::value));
-  EXPECT_TRUE((std::is_same<unsigned char, typename absl::allocator_traits<
-                                               HasPointer>::size_type>::value));
-
-  struct HasPropagateOnCopy {
-    using value_type = X;
-    struct propagate_on_container_copy_assignment {};
-  };
-
-  EXPECT_TRUE(
-      (std::is_same<HasPropagateOnCopy::propagate_on_container_copy_assignment,
-                    typename absl::allocator_traits<HasPropagateOnCopy>::
-                        propagate_on_container_copy_assignment>::value));
-  EXPECT_TRUE(
-      (std::is_same<std::false_type,
-                    typename absl::allocator_traits<
-                        A>::propagate_on_container_copy_assignment>::value));
-
-  struct HasPropagateOnMove {
-    using value_type = X;
-    struct propagate_on_container_move_assignment {};
-  };
-
-  EXPECT_TRUE(
-      (std::is_same<HasPropagateOnMove::propagate_on_container_move_assignment,
-                    typename absl::allocator_traits<HasPropagateOnMove>::
-                        propagate_on_container_move_assignment>::value));
-  EXPECT_TRUE(
-      (std::is_same<std::false_type,
-                    typename absl::allocator_traits<
-                        A>::propagate_on_container_move_assignment>::value));
-
-  struct HasPropagateOnSwap {
-    using value_type = X;
-    struct propagate_on_container_swap {};
-  };
-
-  EXPECT_TRUE(
-      (std::is_same<HasPropagateOnSwap::propagate_on_container_swap,
-                    typename absl::allocator_traits<HasPropagateOnSwap>::
-                        propagate_on_container_swap>::value));
-  EXPECT_TRUE(
-      (std::is_same<std::false_type, typename absl::allocator_traits<A>::
-                                         propagate_on_container_swap>::value));
-
-  struct HasIsAlwaysEqual {
-    using value_type = X;
-    struct is_always_equal {};
-  };
-
-  EXPECT_TRUE((std::is_same<HasIsAlwaysEqual::is_always_equal,
-                            typename absl::allocator_traits<
-                                HasIsAlwaysEqual>::is_always_equal>::value));
-  EXPECT_TRUE((std::is_same<std::true_type, typename absl::allocator_traits<
-                                                A>::is_always_equal>::value));
-  struct NonEmpty {
-    using value_type = X;
-    int i;
-  };
-  EXPECT_TRUE(
-      (std::is_same<std::false_type,
-                    absl::allocator_traits<NonEmpty>::is_always_equal>::value));
-}
-
-template <typename T>
-struct AllocWithPrivateInheritance : private std::allocator<T> {
-  using value_type = T;
-};
-
-TEST(AllocatorTraits, RebindWithPrivateInheritance) {
-  // Regression test for some versions of gcc that do not like the sfinae we
-  // used in combination with private inheritance.
-  EXPECT_TRUE(
-      (std::is_same<AllocWithPrivateInheritance<int>,
-                    absl::allocator_traits<AllocWithPrivateInheritance<char>>::
-                        rebind_alloc<int>>::value));
-}
-
-template <typename T>
-struct Rebound {};
-
-struct AllocWithRebind {
-  using value_type = int;
-  template <typename T>
-  struct rebind {
-    using other = Rebound<T>;
-  };
-};
-
-template <typename T, typename U>
-struct AllocWithoutRebind {
-  using value_type = int;
-};
-
-TEST(AllocatorTraits, Rebind) {
-  EXPECT_TRUE(
-      (std::is_same<Rebound<int>,
-                    typename absl::allocator_traits<
-                        AllocWithRebind>::template rebind_alloc<int>>::value));
-  EXPECT_TRUE(
-      (std::is_same<absl::allocator_traits<Rebound<int>>,
-                    typename absl::allocator_traits<
-                        AllocWithRebind>::template rebind_traits<int>>::value));
-
-  EXPECT_TRUE(
-      (std::is_same<AllocWithoutRebind<double, char>,
-                    typename absl::allocator_traits<AllocWithoutRebind<
-                        int, char>>::template rebind_alloc<double>>::value));
-  EXPECT_TRUE(
-      (std::is_same<absl::allocator_traits<AllocWithoutRebind<double, char>>,
-                    typename absl::allocator_traits<AllocWithoutRebind<
-                        int, char>>::template rebind_traits<double>>::value));
-}
-
-struct TestValue {
-  TestValue() {}
-  explicit TestValue(int* trace) : trace(trace) { ++*trace; }
-  ~TestValue() {
-    if (trace) --*trace;
-  }
-  int* trace = nullptr;
-};
-
-struct MinimalMockAllocator {
-  MinimalMockAllocator() : value(0) {}
-  explicit MinimalMockAllocator(int value) : value(value) {}
-  MinimalMockAllocator(const MinimalMockAllocator& other)
-      : value(other.value) {}
-  using value_type = TestValue;
-  MOCK_METHOD(value_type*, allocate, (size_t));
-  MOCK_METHOD(void, deallocate, (value_type*, size_t));
-
-  int value;
-};
-
-TEST(AllocatorTraits, FunctionsMinimal) {
-  int trace = 0;
-  int hint;
-  TestValue x(&trace);
-  MinimalMockAllocator mock;
-  using Traits = absl::allocator_traits<MinimalMockAllocator>;
-  EXPECT_CALL(mock, allocate(7)).WillRepeatedly(Return(&x));
-  EXPECT_CALL(mock, deallocate(&x, 7));
-
-  EXPECT_EQ(&x, Traits::allocate(mock, 7));
-  static_cast<void>(Traits::allocate(mock, 7, static_cast<const void*>(&hint)));
-  EXPECT_EQ(&x, Traits::allocate(mock, 7, static_cast<const void*>(&hint)));
-  Traits::deallocate(mock, &x, 7);
-
-  EXPECT_EQ(1, trace);
-  Traits::construct(mock, &x, &trace);
-  EXPECT_EQ(2, trace);
-  Traits::destroy(mock, &x);
-  EXPECT_EQ(1, trace);
-
-  EXPECT_EQ(std::numeric_limits<size_t>::max() / sizeof(TestValue),
-            Traits::max_size(mock));
-
-  EXPECT_EQ(0, mock.value);
-  EXPECT_EQ(0, Traits::select_on_container_copy_construction(mock).value);
-}
-
-struct FullMockAllocator {
-  FullMockAllocator() : value(0) {}
-  explicit FullMockAllocator(int value) : value(value) {}
-  FullMockAllocator(const FullMockAllocator& other) : value(other.value) {}
-  using value_type = TestValue;
-  MOCK_METHOD(value_type*, allocate, (size_t));
-  MOCK_METHOD(value_type*, allocate, (size_t, const void*));
-  MOCK_METHOD(void, construct, (value_type*, int*));
-  MOCK_METHOD(void, destroy, (value_type*));
-  MOCK_METHOD(size_t, max_size, (),
-              (const));
-  MOCK_METHOD(FullMockAllocator, select_on_container_copy_construction, (),
-              (const));
-
-  int value;
-};
-
-TEST(AllocatorTraits, FunctionsFull) {
-  int trace = 0;
-  int hint;
-  TestValue x(&trace), y;
-  FullMockAllocator mock;
-  using Traits = absl::allocator_traits<FullMockAllocator>;
-  EXPECT_CALL(mock, allocate(7)).WillRepeatedly(Return(&x));
-  EXPECT_CALL(mock, allocate(13, &hint)).WillRepeatedly(Return(&y));
-  EXPECT_CALL(mock, construct(&x, &trace));
-  EXPECT_CALL(mock, destroy(&x));
-  EXPECT_CALL(mock, max_size()).WillRepeatedly(Return(17));
-  EXPECT_CALL(mock, select_on_container_copy_construction())
-      .WillRepeatedly(Return(FullMockAllocator(23)));
-
-  EXPECT_EQ(&x, Traits::allocate(mock, 7));
-  EXPECT_EQ(&y, Traits::allocate(mock, 13, static_cast<const void*>(&hint)));
-
-  EXPECT_EQ(1, trace);
-  Traits::construct(mock, &x, &trace);
-  EXPECT_EQ(1, trace);
-  Traits::destroy(mock, &x);
-  EXPECT_EQ(1, trace);
-
-  EXPECT_EQ(17, Traits::max_size(mock));
-
-  EXPECT_EQ(0, mock.value);
-  EXPECT_EQ(23, Traits::select_on_container_copy_construction(mock).value);
-}
-
 TEST(AllocatorNoThrowTest, DefaultAllocator) {
 #if defined(ABSL_ALLOCATOR_NOTHROW) && ABSL_ALLOCATOR_NOTHROW
   EXPECT_TRUE(absl::default_allocator_is_nothrow::value);
diff --git a/abseil-cpp/absl/meta/BUILD.bazel b/abseil-cpp/absl/meta/BUILD.bazel
index 5585fcc..13051d8 100644
--- a/abseil-cpp/absl/meta/BUILD.bazel
+++ b/abseil-cpp/absl/meta/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -33,6 +32,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:config",
+        "//absl/base:core_headers",
     ],
 )
 
@@ -43,6 +43,9 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":type_traits",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/time",
         "@com_google_googletest//:gtest_main",
     ],
 )
diff --git a/abseil-cpp/absl/meta/CMakeLists.txt b/abseil-cpp/absl/meta/CMakeLists.txt
index 672ead2..d509114 100644
--- a/abseil-cpp/absl/meta/CMakeLists.txt
+++ b/abseil-cpp/absl/meta/CMakeLists.txt
@@ -23,6 +23,7 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::config
+    absl::core_headers
   PUBLIC
 )
 
@@ -34,8 +35,11 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::config
+    absl::time
+    absl::core_headers
     absl::type_traits
-    gmock_main
+    GTest::gmock_main
 )
 
 # component target
diff --git a/abseil-cpp/absl/meta/type_traits.h b/abseil-cpp/absl/meta/type_traits.h
index 75689bb..cf71164 100644
--- a/abseil-cpp/absl/meta/type_traits.h
+++ b/abseil-cpp/absl/meta/type_traits.h
@@ -35,89 +35,31 @@
 #ifndef ABSL_META_TYPE_TRAITS_H_
 #define ABSL_META_TYPE_TRAITS_H_
 
-#include <stddef.h>
+#include <cstddef>
 #include <functional>
 #include <type_traits>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 
-// MSVC constructibility traits do not detect destructor properties and so our
-// implementations should not use them as a source-of-truth.
-#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__)
-#define ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1
-#endif
+// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
+// feature.
+#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__
+#else  // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t)
+#endif  // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-// Defined and documented later on in this file.
-template <typename T>
-struct is_trivially_destructible;
-
-// Defined and documented later on in this file.
-template <typename T>
-struct is_trivially_move_assignable;
-
 namespace type_traits_internal {
 
-// Silence MSVC warnings about the destructor being defined as deleted.
-#if defined(_MSC_VER) && !defined(__GNUC__)
-#pragma warning(push)
-#pragma warning(disable : 4624)
-#endif  // defined(_MSC_VER) && !defined(__GNUC__)
-
-template <class T>
-union SingleMemberUnion {
-  T t;
-};
-
-// Restore the state of the destructor warning that was silenced above.
-#if defined(_MSC_VER) && !defined(__GNUC__)
-#pragma warning(pop)
-#endif  // defined(_MSC_VER) && !defined(__GNUC__)
-
-template <class T>
-struct IsTriviallyMoveConstructibleObject
-    : std::integral_constant<
-          bool, std::is_move_constructible<
-                    type_traits_internal::SingleMemberUnion<T>>::value &&
-                    absl::is_trivially_destructible<T>::value> {};
-
-template <class T>
-struct IsTriviallyCopyConstructibleObject
-    : std::integral_constant<
-          bool, std::is_copy_constructible<
-                    type_traits_internal::SingleMemberUnion<T>>::value &&
-                    absl::is_trivially_destructible<T>::value> {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference : std::false_type {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference<T&>
-    : absl::is_trivially_move_assignable<T>::type {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference<T&&>
-    : absl::is_trivially_move_assignable<T>::type {};
-
 template <typename... Ts>
 struct VoidTImpl {
   using type = void;
 };
 
-// This trick to retrieve a default alignment is necessary for our
-// implementation of aligned_storage_t to be consistent with any implementation
-// of std::aligned_storage.
-template <size_t Len, typename T = std::aligned_storage<Len>>
-struct default_alignment_of_aligned_storage;
-
-template <size_t Len, size_t Align>
-struct default_alignment_of_aligned_storage<Len,
-                                            std::aligned_storage<Len, Align>> {
-  static constexpr size_t value = Align;
-};
-
 ////////////////////////////////
 // Library Fundamentals V2 TS //
 ////////////////////////////////
@@ -161,39 +103,8 @@
 struct is_detected_convertible
     : is_detected_convertible_impl<void, To, Op, Args...>::type {};
 
-template <typename T>
-using IsCopyAssignableImpl =
-    decltype(std::declval<T&>() = std::declval<const T&>());
-
-template <typename T>
-using IsMoveAssignableImpl = decltype(std::declval<T&>() = std::declval<T&&>());
-
 }  // namespace type_traits_internal
 
-// MSVC 19.20 has a regression that causes our workarounds to fail, but their
-// std forms now appear to be compliant.
-#if defined(_MSC_VER) && !defined(__clang__) && (_MSC_VER >= 1920)
-
-template <typename T>
-using is_copy_assignable = std::is_copy_assignable<T>;
-
-template <typename T>
-using is_move_assignable = std::is_move_assignable<T>;
-
-#else
-
-template <typename T>
-struct is_copy_assignable : type_traits_internal::is_detected<
-                                type_traits_internal::IsCopyAssignableImpl, T> {
-};
-
-template <typename T>
-struct is_move_assignable : type_traits_internal::is_detected<
-                                type_traits_internal::IsMoveAssignableImpl, T> {
-};
-
-#endif
-
 // void_t()
 //
 // Ignores the type of any its arguments and returns `void`. In general, this
@@ -274,273 +185,50 @@
           bool, !(std::is_reference<T>::value ||
                   std::is_const<typename std::add_const<T>::type>::value)> {};
 
+// is_copy_assignable()
+// is_move_assignable()
 // is_trivially_destructible()
-//
-// Determines whether the passed type `T` is trivially destructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_destructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: the extensions (__has_trivial_xxx) are implemented in gcc (version >=
-// 4.3) and clang. Since we are supporting libstdc++ > 4.7, they should always
-// be present. These  extensions are documented at
-// https://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html#Type-Traits.
-template <typename T>
-struct is_trivially_destructible
-    : std::integral_constant<bool, __has_trivial_destructor(T) &&
-                                   std::is_destructible<T>::value> {
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
- private:
-  static constexpr bool compliant = std::is_trivially_destructible<T>::value ==
-                                    is_trivially_destructible::value;
-  static_assert(compliant || std::is_trivially_destructible<T>::value,
-                "Not compliant with std::is_trivially_destructible; "
-                "Standard: false, Implementation: true");
-  static_assert(compliant || !std::is_trivially_destructible<T>::value,
-                "Not compliant with std::is_trivially_destructible; "
-                "Standard: true, Implementation: false");
-#endif  // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
-};
-
 // is_trivially_default_constructible()
-//
-// Determines whether the passed type `T` is trivially default constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_default_constructible()` metafunction for platforms that
-// have incomplete C++11 support (such as libstdc++ 4.x). On any platforms that
-// do fully support C++11, we check whether this yields the same result as the
-// std implementation.
-//
-// NOTE: according to the C++ standard, Section: 20.15.4.3 [meta.unary.prop]
-// "The predicate condition for a template specialization is_constructible<T,
-// Args...> shall be satisfied if and only if the following variable
-// definition would be well-formed for some invented variable t:
-//
-// T t(declval<Args>()...);
-//
-// is_trivially_constructible<T, Args...> additionally requires that the
-// variable definition does not call any operation that is not trivial.
-// For the purposes of this check, the call to std::declval is considered
-// trivial."
-//
-// Notes from https://en.cppreference.com/w/cpp/types/is_constructible:
-// In many implementations, is_nothrow_constructible also checks if the
-// destructor throws because it is effectively noexcept(T(arg)). Same
-// applies to is_trivially_constructible, which, in these implementations, also
-// requires that the destructor is trivial.
-// GCC bug 51452: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452
-// LWG issue 2116: http://cplusplus.github.io/LWG/lwg-active.html#2116.
-//
-// "T obj();" need to be well-formed and not call any nontrivial operation.
-// Nontrivially destructible types will cause the expression to be nontrivial.
-template <typename T>
-struct is_trivially_default_constructible
-    : std::integral_constant<bool, __has_trivial_constructor(T) &&
-                                   std::is_default_constructible<T>::value &&
-                                   is_trivially_destructible<T>::value> {
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
-    !defined(                                            \
-        ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
-  static constexpr bool compliant =
-      std::is_trivially_default_constructible<T>::value ==
-      is_trivially_default_constructible::value;
-  static_assert(compliant || std::is_trivially_default_constructible<T>::value,
-                "Not compliant with std::is_trivially_default_constructible; "
-                "Standard: false, Implementation: true");
-  static_assert(compliant || !std::is_trivially_default_constructible<T>::value,
-                "Not compliant with std::is_trivially_default_constructible; "
-                "Standard: true, Implementation: false");
-#endif  // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
 // is_trivially_move_constructible()
-//
-// Determines whether the passed type `T` is trivially move constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_move_constructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `T obj(declval<T>());` needs to be well-formed and not call any
-// nontrivial operation.  Nontrivially destructible types will cause the
-// expression to be nontrivial.
-template <typename T>
-struct is_trivially_move_constructible
-    : std::conditional<
-          std::is_object<T>::value && !std::is_array<T>::value,
-          type_traits_internal::IsTriviallyMoveConstructibleObject<T>,
-          std::is_reference<T>>::type::type {
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
-    !defined(                                            \
-        ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
-  static constexpr bool compliant =
-      std::is_trivially_move_constructible<T>::value ==
-      is_trivially_move_constructible::value;
-  static_assert(compliant || std::is_trivially_move_constructible<T>::value,
-                "Not compliant with std::is_trivially_move_constructible; "
-                "Standard: false, Implementation: true");
-  static_assert(compliant || !std::is_trivially_move_constructible<T>::value,
-                "Not compliant with std::is_trivially_move_constructible; "
-                "Standard: true, Implementation: false");
-#endif  // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
 // is_trivially_copy_constructible()
-//
-// Determines whether the passed type `T` is trivially copy constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copy_constructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `T obj(declval<const T&>());` needs to be well-formed and not call any
-// nontrivial operation.  Nontrivially destructible types will cause the
-// expression to be nontrivial.
-template <typename T>
-struct is_trivially_copy_constructible
-    : std::conditional<
-          std::is_object<T>::value && !std::is_array<T>::value,
-          type_traits_internal::IsTriviallyCopyConstructibleObject<T>,
-          std::is_lvalue_reference<T>>::type::type {
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
-    !defined(                                            \
-        ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
-  static constexpr bool compliant =
-      std::is_trivially_copy_constructible<T>::value ==
-      is_trivially_copy_constructible::value;
-  static_assert(compliant || std::is_trivially_copy_constructible<T>::value,
-                "Not compliant with std::is_trivially_copy_constructible; "
-                "Standard: false, Implementation: true");
-  static_assert(compliant || !std::is_trivially_copy_constructible<T>::value,
-                "Not compliant with std::is_trivially_copy_constructible; "
-                "Standard: true, Implementation: false");
-#endif  // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
 // is_trivially_move_assignable()
-//
-// Determines whether the passed type `T` is trivially move assignable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_move_assignable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `is_assignable<T, U>::value` is `true` if the expression
-// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
-// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
-// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
-// `is_trivially_assignable<T&, T>`.
-template <typename T>
-struct is_trivially_move_assignable
-    : std::conditional<
-          std::is_object<T>::value && !std::is_array<T>::value &&
-              std::is_move_assignable<T>::value,
-          std::is_move_assignable<type_traits_internal::SingleMemberUnion<T>>,
-          type_traits_internal::IsTriviallyMoveAssignableReference<T>>::type::
-          type {
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- private:
-  static constexpr bool compliant =
-      std::is_trivially_move_assignable<T>::value ==
-      is_trivially_move_assignable::value;
-  static_assert(compliant || std::is_trivially_move_assignable<T>::value,
-                "Not compliant with std::is_trivially_move_assignable; "
-                "Standard: false, Implementation: true");
-  static_assert(compliant || !std::is_trivially_move_assignable<T>::value,
-                "Not compliant with std::is_trivially_move_assignable; "
-                "Standard: true, Implementation: false");
-#endif  // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
-};
-
 // is_trivially_copy_assignable()
 //
-// Determines whether the passed type `T` is trivially copy assignable.
+// Historical note: Abseil once provided implementations of these type traits
+// for platforms that lacked full support. New code should prefer to use the
+// std variants.
 //
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copy_assignable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `is_assignable<T, U>::value` is `true` if the expression
-// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
-// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
-// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
-// `is_trivially_assignable<T&, const T&>`.
-template <typename T>
-struct is_trivially_copy_assignable
-    : std::integral_constant<
-          bool, __has_trivial_assign(typename std::remove_reference<T>::type) &&
-                    absl::is_copy_assignable<T>::value> {
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- private:
-  static constexpr bool compliant =
-      std::is_trivially_copy_assignable<T>::value ==
-      is_trivially_copy_assignable::value;
-  static_assert(compliant || std::is_trivially_copy_assignable<T>::value,
-                "Not compliant with std::is_trivially_copy_assignable; "
-                "Standard: false, Implementation: true");
-  static_assert(compliant || !std::is_trivially_copy_assignable<T>::value,
-                "Not compliant with std::is_trivially_copy_assignable; "
-                "Standard: true, Implementation: false");
-#endif  // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
-};
+// See the documentation for the STL <type_traits> header for more information:
+// https://en.cppreference.com/w/cpp/header/type_traits
+using std::is_copy_assignable;
+using std::is_move_assignable;
+using std::is_trivially_copy_assignable;
+using std::is_trivially_copy_constructible;
+using std::is_trivially_default_constructible;
+using std::is_trivially_destructible;
+using std::is_trivially_move_assignable;
+using std::is_trivially_move_constructible;
 
-namespace type_traits_internal {
-// is_trivially_copyable()
-//
-// Determines whether the passed type `T` is trivially copyable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copyable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). We use the C++17 definition
-// of TriviallyCopyable.
-//
-// NOTE: `is_trivially_copyable<T>::value` is `true` if all of T's copy/move
-// constructors/assignment operators are trivial or deleted, T has at least
-// one non-deleted copy/move constructor/assignment operator, and T is trivially
-// destructible. Arrays of trivially copyable types are trivially copyable.
-//
-// We expose this metafunction only for internal use within absl.
+#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L
 template <typename T>
-class is_trivially_copyable_impl {
-  using ExtentsRemoved = typename std::remove_all_extents<T>::type;
-  static constexpr bool kIsCopyOrMoveConstructible =
-      std::is_copy_constructible<ExtentsRemoved>::value ||
-      std::is_move_constructible<ExtentsRemoved>::value;
-  static constexpr bool kIsCopyOrMoveAssignable =
-      absl::is_copy_assignable<ExtentsRemoved>::value ||
-      absl::is_move_assignable<ExtentsRemoved>::value;
+using remove_cvref = std::remove_cvref<T>;
 
- public:
-  static constexpr bool kValue =
-      (__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) &&
-      (__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) &&
-      (kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) &&
-      is_trivially_destructible<ExtentsRemoved>::value &&
-      // We need to check for this explicitly because otherwise we'll say
-      // references are trivial copyable when compiled by MSVC.
-      !std::is_reference<ExtentsRemoved>::value;
+template <typename T>
+using remove_cvref_t = typename std::remove_cvref<T>::type;
+#else
+// remove_cvref()
+//
+// C++11 compatible implementation of std::remove_cvref which was added in
+// C++20.
+template <typename T>
+struct remove_cvref {
+  using type =
+      typename std::remove_cv<typename std::remove_reference<T>::type>::type;
 };
 
 template <typename T>
-struct is_trivially_copyable
-    : std::integral_constant<
-          bool, type_traits_internal::is_trivially_copyable_impl<T>::kValue> {};
-}  // namespace type_traits_internal
+using remove_cvref_t = typename remove_cvref<T>::type;
+#endif
 
 // -----------------------------------------------------------------------------
 // C++14 "_t" trait aliases
@@ -591,9 +279,26 @@
 template <typename T>
 using remove_all_extents_t = typename std::remove_all_extents<T>::type;
 
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+namespace type_traits_internal {
+// This trick to retrieve a default alignment is necessary for our
+// implementation of aligned_storage_t to be consistent with any
+// implementation of std::aligned_storage.
+template <size_t Len, typename T = std::aligned_storage<Len>>
+struct default_alignment_of_aligned_storage;
+
+template <size_t Len, size_t Align>
+struct default_alignment_of_aligned_storage<
+    Len, std::aligned_storage<Len, Align>> {
+  static constexpr size_t value = Align;
+};
+}  // namespace type_traits_internal
+
+// TODO(b/260219225): std::aligned_storage(_t) is deprecated in C++23.
 template <size_t Len, size_t Align = type_traits_internal::
                           default_alignment_of_aligned_storage<Len>::value>
 using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
 template <typename T>
 using decay_t = typename std::decay<T>::type;
@@ -610,8 +315,23 @@
 template <typename T>
 using underlying_type_t = typename std::underlying_type<T>::type;
 
-template <typename T>
-using result_of_t = typename std::result_of<T>::type;
+
+namespace type_traits_internal {
+
+#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
+    (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+// std::result_of is deprecated (C++17) or removed (C++20)
+template<typename> struct result_of;
+template<typename F, typename... Args>
+struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
+#else
+template<typename F> using result_of = std::result_of<F>;
+#endif
+
+}  // namespace type_traits_internal
+
+template<typename F>
+using result_of_t = typename type_traits_internal::result_of<F>::type;
 
 namespace type_traits_internal {
 // In MSVC we can't probe std::hash or stdext::hash because it triggers a
@@ -747,6 +467,97 @@
 using swap_internal::StdSwapIsUnconstrained;
 
 }  // namespace type_traits_internal
+
+// absl::is_trivially_relocatable<T>
+//
+// Detects whether a type is known to be "trivially relocatable" -- meaning it
+// can be relocated without invoking the constructor/destructor, using a form of
+// move elision.
+//
+// This trait is conservative, for backwards compatibility. If it's true then
+// the type is definitely trivially relocatable, but if it's false then the type
+// may or may not be.
+//
+// Example:
+//
+// if constexpr (absl::is_trivially_relocatable<T>::value) {
+//   memcpy(new_location, old_location, sizeof(T));
+// } else {
+//   new(new_location) T(std::move(*old_location));
+//   old_location->~T();
+// }
+//
+// Upstream documentation:
+//
+// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable
+
+// If the compiler offers a builtin that tells us the answer, we can use that.
+// This covers all of the cases in the fallback below, plus types that opt in
+// using e.g. [[clang::trivial_abi]].
+//
+// Clang on Windows has the builtin, but it falsely claims types with a
+// user-provided destructor are trivial (http://b/275003464). So we opt out
+// there.
+//
+// TODO(b/275003464): remove the opt-out once the bug is fixed.
+//
+// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
+// work with NVCC either.
+#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) &&                 \
+    !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \
+    !defined(__NVCC__)
+template <class T>
+struct is_trivially_relocatable
+    : std::integral_constant<bool, __is_trivially_relocatable(T)> {};
+#else
+// Otherwise we use a fallback that detects only those types we can feasibly
+// detect. Any time that has trivial move-construction and destruction
+// operations is by definition trivially relocatable.
+template <class T>
+struct is_trivially_relocatable
+    : absl::conjunction<absl::is_trivially_move_constructible<T>,
+                        absl::is_trivially_destructible<T>> {};
+#endif
+
+// absl::is_constant_evaluated()
+//
+// Detects whether the function call occurs within a constant-evaluated context.
+// Returns true if the evaluation of the call occurs within the evaluation of an
+// expression or conversion that is manifestly constant-evaluated; otherwise
+// returns false.
+//
+// This function is implemented in terms of `std::is_constant_evaluated` for
+// c++20 and up. For older c++ versions, the function is implemented in terms
+// of `__builtin_is_constant_evaluated` if available, otherwise the function
+// will fail to compile.
+//
+// Applications can inspect `ABSL_HAVE_CONSTANT_EVALUATED` at compile time
+// to check if this function is supported.
+//
+// Example:
+//
+// constexpr MyClass::MyClass(int param) {
+// #ifdef ABSL_HAVE_CONSTANT_EVALUATED
+//   if (!absl::is_constant_evaluated()) {
+//     ABSL_LOG(INFO) << "MyClass(" << param << ")";
+//   }
+// #endif  // ABSL_HAVE_CONSTANT_EVALUATED
+// }
+//
+// Upstream documentation:
+//
+// http://en.cppreference.com/w/cpp/types/is_constant_evaluated
+// http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#:~:text=__builtin_is_constant_evaluated
+//
+#if defined(ABSL_HAVE_CONSTANT_EVALUATED)
+constexpr bool is_constant_evaluated() noexcept {
+#ifdef __cpp_lib_is_constant_evaluated
+  return std::is_constant_evaluated();
+#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
+  return __builtin_is_constant_evaluated();
+#endif
+}
+#endif  // ABSL_HAVE_CONSTANT_EVALUATED
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/abseil-cpp/absl/meta/type_traits_test.cc b/abseil-cpp/absl/meta/type_traits_test.cc
index 1aafd0d..7412f33 100644
--- a/abseil-cpp/absl/meta/type_traits_test.cc
+++ b/abseil-cpp/absl/meta/type_traits_test.cc
@@ -21,6 +21,10 @@
 #include <vector>
 
 #include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
 
 namespace {
 
@@ -336,6 +340,7 @@
 
 struct NonCopyableOrMovable {
   NonCopyableOrMovable() = default;
+  virtual ~NonCopyableOrMovable() = default;
   NonCopyableOrMovable(const NonCopyableOrMovable&) = delete;
   NonCopyableOrMovable(NonCopyableOrMovable&&) = delete;
   NonCopyableOrMovable& operator=(const NonCopyableOrMovable&) = delete;
@@ -347,29 +352,6 @@
   virtual ~Base() {}
 };
 
-// Old versions of libc++, around Clang 3.5 to 3.6, consider deleted destructors
-// as also being trivial. With the resolution of CWG 1928 and CWG 1734, this
-// is no longer considered true and has thus been amended.
-// Compiler Explorer: https://godbolt.org/g/zT59ZL
-// CWG issue 1734: http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1734
-// CWG issue 1928: http://open-std.org/JTC1/SC22/WG21/docs/cwg_closed.html#1928
-#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION >= 3700
-#define ABSL_TRIVIALLY_DESTRUCTIBLE_CONSIDER_DELETED_DESTRUCTOR_NOT_TRIVIAL 1
-#endif
-
-// As of the moment, GCC versions >5.1 have a problem compiling for
-// std::is_trivially_default_constructible<NontrivialDestructor[10]>, where
-// NontrivialDestructor is a struct with a custom nontrivial destructor. Note
-// that this problem only occurs for arrays of a known size, so something like
-// std::is_trivially_default_constructible<NontrivialDestructor[]> does not
-// have any problems.
-// Compiler Explorer: https://godbolt.org/g/dXRbdK
-// GCC bug 83689: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83689
-#if defined(__clang__) || defined(_MSC_VER) || \
-    (defined(__GNUC__) && __GNUC__ < 5)
-#define ABSL_GCC_BUG_TRIVIALLY_CONSTRUCTIBLE_ON_ARRAY_OF_NONTRIVIAL 1
-#endif
-
 TEST(TypeTraitsTest, TestIsFunction) {
   struct Callable {
     void operator()() {}
@@ -386,560 +368,32 @@
   EXPECT_FALSE(absl::is_function<Callable>::value);
 }
 
-TEST(TypeTraitsTest, TestTrivialDestructor) {
-  // Verify that arithmetic types and pointers have trivial destructors.
-  EXPECT_TRUE(absl::is_trivially_destructible<bool>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<char>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<unsigned char>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<signed char>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<wchar_t>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<int>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<unsigned int>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<int16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<uint16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<int64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<uint64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<float>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<double>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<long double>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<const std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<const Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<std::string**>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<Trivial**>::value);
-
-  // classes with destructors
-  EXPECT_TRUE(absl::is_trivially_destructible<Trivial>::value);
-  EXPECT_TRUE(absl::is_trivially_destructible<TrivialDestructor>::value);
-
-  // Verify that types with a nontrivial or deleted destructor
-  // are marked as such.
-  EXPECT_FALSE(absl::is_trivially_destructible<NontrivialDestructor>::value);
-#ifdef ABSL_TRIVIALLY_DESTRUCTIBLE_CONSIDER_DELETED_DESTRUCTOR_NOT_TRIVIAL
-  EXPECT_FALSE(absl::is_trivially_destructible<DeletedDestructor>::value);
-#endif
-
-  // simple_pair of such types is trivial
-  EXPECT_TRUE((absl::is_trivially_destructible<simple_pair<int, int>>::value));
-  EXPECT_TRUE((absl::is_trivially_destructible<
-               simple_pair<Trivial, TrivialDestructor>>::value));
-
-  // Verify that types without trivial destructors are correctly marked as such.
-  EXPECT_FALSE(absl::is_trivially_destructible<std::string>::value);
-  EXPECT_FALSE(absl::is_trivially_destructible<std::vector<int>>::value);
-
-  // Verify that simple_pairs of types without trivial destructors
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::is_trivially_destructible<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::is_trivially_destructible<
-                simple_pair<std::string, int>>::value));
-
-  // array of such types is trivial
-  using int10 = int[10];
-  EXPECT_TRUE(absl::is_trivially_destructible<int10>::value);
-  using Trivial10 = Trivial[10];
-  EXPECT_TRUE(absl::is_trivially_destructible<Trivial10>::value);
-  using TrivialDestructor10 = TrivialDestructor[10];
-  EXPECT_TRUE(absl::is_trivially_destructible<TrivialDestructor10>::value);
-
-  // Conversely, the opposite also holds.
-  using NontrivialDestructor10 = NontrivialDestructor[10];
-  EXPECT_FALSE(absl::is_trivially_destructible<NontrivialDestructor10>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialDefaultCtor) {
-  // arithmetic types and pointers have trivial default constructors.
-  EXPECT_TRUE(absl::is_trivially_default_constructible<bool>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<char>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<unsigned char>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<signed char>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<wchar_t>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<int>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<unsigned int>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<int16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<uint16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<int64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<uint64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<float>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<double>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<long double>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial*>::value);
+TEST(TypeTraitsTest, TestRemoveCVRef) {
   EXPECT_TRUE(
-      absl::is_trivially_default_constructible<const std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<const Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<std::string**>::value);
-  EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial**>::value);
-
-  // types with compiler generated default ctors
-  EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial>::value);
+      (std::is_same<typename absl::remove_cvref<int>::type, int>::value));
   EXPECT_TRUE(
-      absl::is_trivially_default_constructible<TrivialDefaultCtor>::value);
-
-  // Verify that types without them are not.
-  EXPECT_FALSE(
-      absl::is_trivially_default_constructible<NontrivialDefaultCtor>::value);
-  EXPECT_FALSE(
-      absl::is_trivially_default_constructible<DeletedDefaultCtor>::value);
-
-  // types with nontrivial destructor are nontrivial
-  EXPECT_FALSE(
-      absl::is_trivially_default_constructible<NontrivialDestructor>::value);
-
-  // types with vtables
-  EXPECT_FALSE(absl::is_trivially_default_constructible<Base>::value);
-
-  // Verify that simple_pair has trivial constructors where applicable.
-  EXPECT_TRUE((absl::is_trivially_default_constructible<
-               simple_pair<int, char*>>::value));
-  EXPECT_TRUE((absl::is_trivially_default_constructible<
-               simple_pair<int, Trivial>>::value));
-  EXPECT_TRUE((absl::is_trivially_default_constructible<
-               simple_pair<int, TrivialDefaultCtor>>::value));
-
-  // Verify that types without trivial constructors are
-  // correctly marked as such.
-  EXPECT_FALSE(absl::is_trivially_default_constructible<std::string>::value);
-  EXPECT_FALSE(
-      absl::is_trivially_default_constructible<std::vector<int>>::value);
-
-  // Verify that simple_pairs of types without trivial constructors
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::is_trivially_default_constructible<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::is_trivially_default_constructible<
-                simple_pair<std::string, int>>::value));
-
-  // Verify that arrays of such types are trivially default constructible
-  using int10 = int[10];
-  EXPECT_TRUE(absl::is_trivially_default_constructible<int10>::value);
-  using Trivial10 = Trivial[10];
-  EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial10>::value);
-  using TrivialDefaultCtor10 = TrivialDefaultCtor[10];
+      (std::is_same<typename absl::remove_cvref<int&>::type, int>::value));
   EXPECT_TRUE(
-      absl::is_trivially_default_constructible<TrivialDefaultCtor10>::value);
-
-  // Conversely, the opposite also holds.
-#ifdef ABSL_GCC_BUG_TRIVIALLY_CONSTRUCTIBLE_ON_ARRAY_OF_NONTRIVIAL
-  using NontrivialDefaultCtor10 = NontrivialDefaultCtor[10];
-  EXPECT_FALSE(
-      absl::is_trivially_default_constructible<NontrivialDefaultCtor10>::value);
-#endif
-}
-
-// GCC prior to 7.4 had a bug in its trivially-constructible traits
-// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80654).
-// This test makes sure that we do not depend on the trait in these cases when
-// implementing absl triviality traits.
-
-template <class T>
-struct BadConstructors {
-  BadConstructors() { static_assert(T::value, ""); }
-
-  BadConstructors(BadConstructors&&) { static_assert(T::value, ""); }
-
-  BadConstructors(const BadConstructors&) { static_assert(T::value, ""); }
-};
-
-TEST(TypeTraitsTest, TestTrivialityBadConstructors) {
-  using BadType = BadConstructors<int>;
-
-  EXPECT_FALSE(absl::is_trivially_default_constructible<BadType>::value);
-  EXPECT_FALSE(absl::is_trivially_move_constructible<BadType>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<BadType>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialMoveCtor) {
-  // Verify that arithmetic types and pointers have trivial move
-  // constructors.
-  EXPECT_TRUE(absl::is_trivially_move_constructible<bool>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<char>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<unsigned char>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<signed char>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<wchar_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<int>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<unsigned int>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<int16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<uint16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<int64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<uint64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<float>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<double>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<long double>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<const std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<const Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<std::string**>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<Trivial**>::value);
-
-  // Reference types
-  EXPECT_TRUE(absl::is_trivially_move_constructible<int&>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<int&&>::value);
-
-  // types with compiler generated move ctors
-  EXPECT_TRUE(absl::is_trivially_move_constructible<Trivial>::value);
-  EXPECT_TRUE(absl::is_trivially_move_constructible<TrivialMoveCtor>::value);
-
-  // Verify that types without them (i.e. nontrivial or deleted) are not.
-  EXPECT_FALSE(
-      absl::is_trivially_move_constructible<NontrivialCopyCtor>::value);
-  EXPECT_FALSE(absl::is_trivially_move_constructible<DeletedCopyCtor>::value);
-  EXPECT_FALSE(
-      absl::is_trivially_move_constructible<NonCopyableOrMovable>::value);
-
-  // type with nontrivial destructor are nontrivial move construbtible
-  EXPECT_FALSE(
-      absl::is_trivially_move_constructible<NontrivialDestructor>::value);
-
-  // types with vtables
-  EXPECT_FALSE(absl::is_trivially_move_constructible<Base>::value);
-
-  // Verify that simple_pair of such types is trivially move constructible
-  EXPECT_TRUE(
-      (absl::is_trivially_move_constructible<simple_pair<int, char*>>::value));
+      (std::is_same<typename absl::remove_cvref<int&&>::type, int>::value));
   EXPECT_TRUE((
-      absl::is_trivially_move_constructible<simple_pair<int, Trivial>>::value));
-  EXPECT_TRUE((absl::is_trivially_move_constructible<
-               simple_pair<int, TrivialMoveCtor>>::value));
-
-  // Verify that types without trivial move constructors are
-  // correctly marked as such.
-  EXPECT_FALSE(absl::is_trivially_move_constructible<std::string>::value);
-  EXPECT_FALSE(absl::is_trivially_move_constructible<std::vector<int>>::value);
-
-  // Verify that simple_pairs of types without trivial move constructors
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::is_trivially_move_constructible<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::is_trivially_move_constructible<
-                simple_pair<std::string, int>>::value));
-
-  // Verify that arrays are not
-  using int10 = int[10];
-  EXPECT_FALSE(absl::is_trivially_move_constructible<int10>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialCopyCtor) {
-  // Verify that arithmetic types and pointers have trivial copy
-  // constructors.
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<bool>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<char>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<unsigned char>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<signed char>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<wchar_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<int>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<unsigned int>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<int16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<uint16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<int64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<uint64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<float>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<double>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<long double>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<const std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<const Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<std::string**>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<Trivial**>::value);
-
-  // Reference types
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<int&>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<int&&>::value);
-
-  // types with compiler generated copy ctors
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<Trivial>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_constructible<TrivialCopyCtor>::value);
-
-  // Verify that types without them (i.e. nontrivial or deleted) are not.
-  EXPECT_FALSE(
-      absl::is_trivially_copy_constructible<NontrivialCopyCtor>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<DeletedCopyCtor>::value);
-  EXPECT_FALSE(
-      absl::is_trivially_copy_constructible<MovableNonCopyable>::value);
-  EXPECT_FALSE(
-      absl::is_trivially_copy_constructible<NonCopyableOrMovable>::value);
-
-  // type with nontrivial destructor are nontrivial copy construbtible
-  EXPECT_FALSE(
-      absl::is_trivially_copy_constructible<NontrivialDestructor>::value);
-
-  // types with vtables
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<Base>::value);
-
-  // Verify that simple_pair of such types is trivially copy constructible
+      std::is_same<typename absl::remove_cvref<const int&>::type, int>::value));
   EXPECT_TRUE(
-      (absl::is_trivially_copy_constructible<simple_pair<int, char*>>::value));
-  EXPECT_TRUE((
-      absl::is_trivially_copy_constructible<simple_pair<int, Trivial>>::value));
-  EXPECT_TRUE((absl::is_trivially_copy_constructible<
-               simple_pair<int, TrivialCopyCtor>>::value));
-
-  // Verify that types without trivial copy constructors are
-  // correctly marked as such.
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<std::string>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<std::vector<int>>::value);
-
-  // Verify that simple_pairs of types without trivial copy constructors
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::is_trivially_copy_constructible<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::is_trivially_copy_constructible<
-                simple_pair<std::string, int>>::value));
-
-  // Verify that arrays are not
-  using int10 = int[10];
-  EXPECT_FALSE(absl::is_trivially_copy_constructible<int10>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialMoveAssign) {
-  // Verify that arithmetic types and pointers have trivial move
-  // assignment operators.
-  EXPECT_TRUE(absl::is_trivially_move_assignable<bool>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<char>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<unsigned char>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<signed char>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<wchar_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<int>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<unsigned int>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<int16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<uint16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<int64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<uint64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<float>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<double>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<long double>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<const std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<const Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<std::string**>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial**>::value);
-
-  // const qualified types are not assignable
-  EXPECT_FALSE(absl::is_trivially_move_assignable<const int>::value);
-
-  // types with compiler generated move assignment
-  EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<TrivialMoveAssign>::value);
-
-  // Verify that types without them (i.e. nontrivial or deleted) are not.
-  EXPECT_FALSE(absl::is_trivially_move_assignable<NontrivialCopyAssign>::value);
-  EXPECT_FALSE(absl::is_trivially_move_assignable<DeletedCopyAssign>::value);
-  EXPECT_FALSE(absl::is_trivially_move_assignable<NonCopyableOrMovable>::value);
-
-  // types with vtables
-  EXPECT_FALSE(absl::is_trivially_move_assignable<Base>::value);
-
-  // Verify that simple_pair is trivially assignable
-  EXPECT_TRUE(
-      (absl::is_trivially_move_assignable<simple_pair<int, char*>>::value));
-  EXPECT_TRUE(
-      (absl::is_trivially_move_assignable<simple_pair<int, Trivial>>::value));
-  EXPECT_TRUE((absl::is_trivially_move_assignable<
-               simple_pair<int, TrivialMoveAssign>>::value));
-
-  // Verify that types not trivially move assignable are
-  // correctly marked as such.
-  EXPECT_FALSE(absl::is_trivially_move_assignable<std::string>::value);
-  EXPECT_FALSE(absl::is_trivially_move_assignable<std::vector<int>>::value);
-
-  // Verify that simple_pairs of types not trivially move assignable
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::is_trivially_move_assignable<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::is_trivially_move_assignable<
-                simple_pair<std::string, int>>::value));
-
-  // Verify that arrays are not trivially move assignable
-  using int10 = int[10];
-  EXPECT_FALSE(absl::is_trivially_move_assignable<int10>::value);
-
-  // Verify that references are handled correctly
-  EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial&&>::value);
-  EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial&>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialCopyAssign) {
-  // Verify that arithmetic types and pointers have trivial copy
-  // assignment operators.
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<bool>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<char>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<unsigned char>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<signed char>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<wchar_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<int>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<unsigned int>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<int16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<uint16_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<int64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<uint64_t>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<float>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<double>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<long double>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<const std::string*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<const Trivial*>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<std::string**>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial**>::value);
-
-  // const qualified types are not assignable
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<const int>::value);
-
-  // types with compiler generated copy assignment
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<TrivialCopyAssign>::value);
-
-  // Verify that types without them (i.e. nontrivial or deleted) are not.
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<NontrivialCopyAssign>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<DeletedCopyAssign>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<MovableNonCopyable>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<NonCopyableOrMovable>::value);
-
-  // types with vtables
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<Base>::value);
-
-  // Verify that simple_pair is trivially assignable
-  EXPECT_TRUE(
-      (absl::is_trivially_copy_assignable<simple_pair<int, char*>>::value));
-  EXPECT_TRUE(
-      (absl::is_trivially_copy_assignable<simple_pair<int, Trivial>>::value));
-  EXPECT_TRUE((absl::is_trivially_copy_assignable<
-               simple_pair<int, TrivialCopyAssign>>::value));
-
-  // Verify that types not trivially copy assignable are
-  // correctly marked as such.
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<std::string>::value);
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<std::vector<int>>::value);
-
-  // Verify that simple_pairs of types not trivially copy assignable
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::is_trivially_copy_assignable<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::is_trivially_copy_assignable<
-                simple_pair<std::string, int>>::value));
-
-  // Verify that arrays are not trivially copy assignable
-  using int10 = int[10];
-  EXPECT_FALSE(absl::is_trivially_copy_assignable<int10>::value);
-
-  // Verify that references are handled correctly
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial&&>::value);
-  EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial&>::value);
-}
-
-TEST(TypeTraitsTest, TestTriviallyCopyable) {
-  // Verify that arithmetic types and pointers are trivially copyable.
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<bool>::value);
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<char>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<unsigned char>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<signed char>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<wchar_t>::value);
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<int>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<unsigned int>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<int16_t>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<uint16_t>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<int64_t>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<uint64_t>::value);
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<float>::value);
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<double>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<long double>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<std::string*>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<Trivial*>::value);
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<
-              const std::string*>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<const Trivial*>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<std::string**>::value);
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<Trivial**>::value);
-
-  // const qualified types are not assignable but are constructible
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<const int>::value);
-
-  // Trivial copy constructor/assignment and destructor.
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<Trivial>::value);
-  // Trivial copy assignment, but non-trivial copy constructor/destructor.
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
-               TrivialCopyAssign>::value);
-  // Trivial copy constructor, but non-trivial assignment.
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
-               TrivialCopyCtor>::value);
-
-  // Types with a non-trivial copy constructor/assignment
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
-               NontrivialCopyCtor>::value);
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
-               NontrivialCopyAssign>::value);
-
-  // Types without copy constructor/assignment, but with move
-  // MSVC disagrees with other compilers about this:
-  // EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<
-  //             MovableNonCopyable>::value);
-
-  // Types without copy/move constructor/assignment
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
-               NonCopyableOrMovable>::value);
-
-  // No copy assign, but has trivial copy constructor.
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<
-              DeletedCopyAssign>::value);
-
-  // types with vtables
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<Base>::value);
-
-  // Verify that simple_pair is trivially copyable if members are
-  EXPECT_TRUE((absl::type_traits_internal::is_trivially_copyable<
-               simple_pair<int, char*>>::value));
-  EXPECT_TRUE((absl::type_traits_internal::is_trivially_copyable<
-               simple_pair<int, Trivial>>::value));
-
-  // Verify that types not trivially copyable are
-  // correctly marked as such.
-  EXPECT_FALSE(
-      absl::type_traits_internal::is_trivially_copyable<std::string>::value);
-  EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
-               std::vector<int>>::value);
-
-  // Verify that simple_pairs of types not trivially copyable
-  // are not marked as trivial.
-  EXPECT_FALSE((absl::type_traits_internal::is_trivially_copyable<
-                simple_pair<int, std::string>>::value));
-  EXPECT_FALSE((absl::type_traits_internal::is_trivially_copyable<
-                simple_pair<std::string, int>>::value));
-  EXPECT_FALSE((absl::type_traits_internal::is_trivially_copyable<
-                simple_pair<int, TrivialCopyAssign>>::value));
-
-  // Verify that arrays of trivially copyable types are trivially copyable
-  using int10 = int[10];
-  EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<int10>::value);
-  using int10x10 = int[10][10];
-  EXPECT_TRUE(
-      absl::type_traits_internal::is_trivially_copyable<int10x10>::value);
-
-  // Verify that references are handled correctly
-  EXPECT_FALSE(
-      absl::type_traits_internal::is_trivially_copyable<Trivial&&>::value);
-  EXPECT_FALSE(
-      absl::type_traits_internal::is_trivially_copyable<Trivial&>::value);
+      (std::is_same<typename absl::remove_cvref<int*>::type, int*>::value));
+  // Does not remove const in this case.
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int*>::type,
+                            const int*>::value));
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int[2]>::type,
+                            int[2]>::value));
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int(&)[2]>::type,
+                            int[2]>::value));
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int(&&)[2]>::type,
+                            int[2]>::value));
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int[2]>::type,
+                            int[2]>::value));
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int(&)[2]>::type,
+                            int[2]>::value));
+  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int(&&)[2]>::type,
+                            int[2]>::value));
 }
 
 #define ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(trait_name, ...)          \
@@ -1208,82 +662,6 @@
   EXPECT_EQ(TypeEnum::D, GetTypeExt(Wrap<TypeD>()));
 }
 
-template <typename T>
-bool TestCopyAssign() {
-  return absl::is_copy_assignable<T>::value ==
-         std::is_copy_assignable<T>::value;
-}
-
-TEST(TypeTraitsTest, IsCopyAssignable) {
-  EXPECT_TRUE(TestCopyAssign<int>());
-  EXPECT_TRUE(TestCopyAssign<int&>());
-  EXPECT_TRUE(TestCopyAssign<int&&>());
-
-  struct S {};
-  EXPECT_TRUE(TestCopyAssign<S>());
-  EXPECT_TRUE(TestCopyAssign<S&>());
-  EXPECT_TRUE(TestCopyAssign<S&&>());
-
-  class C {
-   public:
-    explicit C(C* c) : c_(c) {}
-    ~C() { delete c_; }
-
-   private:
-    C* c_;
-  };
-  EXPECT_TRUE(TestCopyAssign<C>());
-  EXPECT_TRUE(TestCopyAssign<C&>());
-  EXPECT_TRUE(TestCopyAssign<C&&>());
-
-  // Reason for ifndef: add_lvalue_reference<T> in libc++ breaks for these cases
-#ifndef _LIBCPP_VERSION
-  EXPECT_TRUE(TestCopyAssign<int()>());
-  EXPECT_TRUE(TestCopyAssign<int(int) const>());
-  EXPECT_TRUE(TestCopyAssign<int(...) volatile&>());
-  EXPECT_TRUE(TestCopyAssign<int(int, ...) const volatile&&>());
-#endif  // _LIBCPP_VERSION
-}
-
-template <typename T>
-bool TestMoveAssign() {
-  return absl::is_move_assignable<T>::value ==
-         std::is_move_assignable<T>::value;
-}
-
-TEST(TypeTraitsTest, IsMoveAssignable) {
-  EXPECT_TRUE(TestMoveAssign<int>());
-  EXPECT_TRUE(TestMoveAssign<int&>());
-  EXPECT_TRUE(TestMoveAssign<int&&>());
-
-  struct S {};
-  EXPECT_TRUE(TestMoveAssign<S>());
-  EXPECT_TRUE(TestMoveAssign<S&>());
-  EXPECT_TRUE(TestMoveAssign<S&&>());
-
-  class C {
-   public:
-    explicit C(C* c) : c_(c) {}
-    ~C() { delete c_; }
-    void operator=(const C&) = delete;
-    void operator=(C&&) = delete;
-
-   private:
-    C* c_;
-  };
-  EXPECT_TRUE(TestMoveAssign<C>());
-  EXPECT_TRUE(TestMoveAssign<C&>());
-  EXPECT_TRUE(TestMoveAssign<C&&>());
-
-  // Reason for ifndef: add_lvalue_reference<T> in libc++ breaks for these cases
-#ifndef _LIBCPP_VERSION
-  EXPECT_TRUE(TestMoveAssign<int()>());
-  EXPECT_TRUE(TestMoveAssign<int(int) const>());
-  EXPECT_TRUE(TestMoveAssign<int(...) volatile&>());
-  EXPECT_TRUE(TestMoveAssign<int(int, ...) const volatile&&>());
-#endif  // _LIBCPP_VERSION
-}
-
 namespace adl_namespace {
 
 struct DeletedSwap {
@@ -1365,4 +743,99 @@
   EXPECT_TRUE(IsNothrowSwappable<adl_namespace::SpecialNoexceptSwap>::value);
 }
 
+TEST(TriviallyRelocatable, PrimitiveTypes) {
+  static_assert(absl::is_trivially_relocatable<int>::value, "");
+  static_assert(absl::is_trivially_relocatable<char>::value, "");
+  static_assert(absl::is_trivially_relocatable<void*>::value, "");
+}
+
+// User-defined types can be trivially relocatable as long as they don't have a
+// user-provided move constructor or destructor.
+TEST(TriviallyRelocatable, UserDefinedTriviallyReconstructible) {
+  struct S {
+    int x;
+    int y;
+  };
+
+  static_assert(absl::is_trivially_relocatable<S>::value, "");
+}
+
+// A user-provided move constructor disqualifies a type from being trivially
+// relocatable.
+TEST(TriviallyRelocatable, UserProvidedMoveConstructor) {
+  struct S {
+    S(S&&) {}  // NOLINT(modernize-use-equals-default)
+  };
+
+  static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
+// A user-provided copy constructor disqualifies a type from being trivially
+// relocatable.
+TEST(TriviallyRelocatable, UserProvidedCopyConstructor) {
+  struct S {
+    S(const S&) {}  // NOLINT(modernize-use-equals-default)
+  };
+
+  static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
+// A user-provided destructor disqualifies a type from being trivially
+// relocatable.
+TEST(TriviallyRelocatable, UserProvidedDestructor) {
+  struct S {
+    ~S() {}  // NOLINT(modernize-use-equals-default)
+  };
+
+  static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
+// TODO(b/275003464): remove the opt-out for Clang on Windows once
+// __is_trivially_relocatable is used there again.
+#if defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) &&      \
+    ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
+    !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64)))
+// A type marked with the "trivial ABI" attribute is trivially relocatable even
+// if it has user-provided move/copy constructors and a user-provided
+// destructor.
+TEST(TrivallyRelocatable, TrivialAbi) {
+  struct ABSL_ATTRIBUTE_TRIVIAL_ABI S {
+    S(S&&) {}       // NOLINT(modernize-use-equals-default)
+    S(const S&) {}  // NOLINT(modernize-use-equals-default)
+    ~S() {}         // NOLINT(modernize-use-equals-default)
+  };
+
+  static_assert(absl::is_trivially_relocatable<S>::value, "");
+}
+#endif
+
+#ifdef ABSL_HAVE_CONSTANT_EVALUATED
+
+constexpr int64_t NegateIfConstantEvaluated(int64_t i) {
+  if (absl::is_constant_evaluated()) {
+    return -i;
+  } else {
+    return i;
+  }
+}
+
+#endif  // ABSL_HAVE_CONSTANT_EVALUATED
+
+TEST(TrivallyRelocatable, is_constant_evaluated) {
+#ifdef ABSL_HAVE_CONSTANT_EVALUATED
+  constexpr int64_t constant = NegateIfConstantEvaluated(42);
+  EXPECT_EQ(constant, -42);
+
+  int64_t now = absl::ToUnixSeconds(absl::Now());
+  int64_t not_constant = NegateIfConstantEvaluated(now);
+  EXPECT_EQ(not_constant, now);
+
+  static int64_t const_init = NegateIfConstantEvaluated(42);
+  EXPECT_EQ(const_init, -42);
+#else
+  GTEST_SKIP() << "absl::is_constant_evaluated is not defined";
+#endif  // ABSL_HAVE_CONSTANT_EVALUATED
+}
+
+
 }  // namespace
diff --git a/abseil-cpp/absl/numeric/BUILD.bazel b/abseil-cpp/absl/numeric/BUILD.bazel
index f808f5d..c5aaf72 100644
--- a/abseil-cpp/absl/numeric/BUILD.bazel
+++ b/abseil-cpp/absl/numeric/BUILD.bazel
@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -25,6 +24,49 @@
 licenses(["notice"])
 
 cc_library(
+    name = "bits",
+    hdrs = [
+        "bits.h",
+        "internal/bits.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_binary(
+    name = "bits_benchmark",
+    testonly = 1,
+    srcs = ["bits_benchmark.cc"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":bits",
+        "//absl/base:core_headers",
+        "//absl/random",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
+cc_test(
+    name = "bits_test",
+    size = "small",
+    srcs = [
+        "bits_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":bits",
+        "//absl/random",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
     name = "int128",
     srcs = [
         "int128.cc",
@@ -35,7 +77,7 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
-        "//absl/base:bits",
+        ":bits",
         "//absl/base:config",
         "//absl/base:core_headers",
     ],
@@ -53,9 +95,9 @@
     deps = [
         ":int128",
         "//absl/base",
-        "//absl/base:core_headers",
         "//absl/hash:hash_testing",
         "//absl/meta:type_traits",
+        "//absl/strings",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -72,3 +114,15 @@
         "@com_github_google_benchmark//:benchmark_main",
     ],
 )
+
+cc_library(
+    name = "representation",
+    hdrs = [
+        "internal/representation.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+    ],
+)
diff --git a/abseil-cpp/absl/numeric/CMakeLists.txt b/abseil-cpp/absl/numeric/CMakeLists.txt
index 1e12d80..7181b91 100644
--- a/abseil-cpp/absl/numeric/CMakeLists.txt
+++ b/abseil-cpp/absl/numeric/CMakeLists.txt
@@ -16,6 +16,33 @@
 
 absl_cc_library(
   NAME
+    bits
+  HDRS
+    "bits.h"
+    "internal/bits.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::core_headers
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    bits_test
+  SRCS
+    "bits_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::bits
+    absl::core_headers
+    absl::random_random
+    GTest::gmock_main
+)
+
+absl_cc_library(
+  NAME
     int128
   HDRS
     "int128.h"
@@ -26,9 +53,9 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
-    absl::bits
     absl::config
     absl::core_headers
+    absl::bits
   PUBLIC
 )
 
@@ -43,10 +70,10 @@
   DEPS
     absl::int128
     absl::base
-    absl::core_headers
     absl::hash_testing
     absl::type_traits
-    gmock_main
+    absl::strings
+    GTest::gmock_main
 )
 
 # component target
@@ -59,3 +86,15 @@
     absl::int128
   PUBLIC
 )
+
+absl_cc_library(
+  NAME
+    numeric_representation
+  HDRS
+    "internal/representation.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+  PUBLIC
+)
diff --git a/abseil-cpp/absl/numeric/bits.h b/abseil-cpp/absl/numeric/bits.h
new file mode 100644
index 0000000..5ed36f5
--- /dev/null
+++ b/abseil-cpp/absl/numeric/bits.h
@@ -0,0 +1,177 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: bits.h
+// -----------------------------------------------------------------------------
+//
+// This file contains implementations of C++20's bitwise math functions, as
+// defined by:
+//
+// P0553R4:
+//  http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0553r4.html
+// P0556R3:
+//  http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0556r3.html
+// P1355R2:
+//  http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
+// P1956R1:
+//  http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
+//
+// When using a standard library that implements these functions, we use the
+// standard library's implementation.
+
+#ifndef ABSL_NUMERIC_BITS_H_
+#define ABSL_NUMERIC_BITS_H_
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+#include "absl/base/config.h"
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+#include <bit>
+#endif
+
+#include "absl/base/attributes.h"
+#include "absl/numeric/internal/bits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+
+// rotating
+template <class T>
+ABSL_MUST_USE_RESULT constexpr
+    typename std::enable_if<std::is_unsigned<T>::value, T>::type
+    rotl(T x, int s) noexcept {
+  return numeric_internal::RotateLeft(x, s);
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT constexpr
+    typename std::enable_if<std::is_unsigned<T>::value, T>::type
+    rotr(T x, int s) noexcept {
+  return numeric_internal::RotateRight(x, s);
+}
+
+// Counting functions
+//
+// While these functions are typically constexpr, on some platforms, they may
+// not be marked as constexpr due to constraints of the compiler/available
+// intrinsics.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, int>::type
+    countl_zero(T x) noexcept {
+  return numeric_internal::CountLeadingZeroes(x);
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, int>::type
+    countl_one(T x) noexcept {
+  // Avoid integer promotion to a wider type
+  return countl_zero(static_cast<T>(~x));
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CTZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, int>::type
+    countr_zero(T x) noexcept {
+  return numeric_internal::CountTrailingZeroes(x);
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CTZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, int>::type
+    countr_one(T x) noexcept {
+  // Avoid integer promotion to a wider type
+  return countr_zero(static_cast<T>(~x));
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline
+    typename std::enable_if<std::is_unsigned<T>::value, int>::type
+    popcount(T x) noexcept {
+  return numeric_internal::Popcount(x);
+}
+#else  // defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
+
+using std::countl_one;
+using std::countl_zero;
+using std::countr_one;
+using std::countr_zero;
+using std::popcount;
+using std::rotl;
+using std::rotr;
+
+#endif
+
+#if !(defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L)
+// Returns: true if x is an integral power of two; false otherwise.
+template <class T>
+constexpr inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+has_single_bit(T x) noexcept {
+  return x != 0 && (x & (x - 1)) == 0;
+}
+
+// Returns: If x == 0, 0; otherwise one plus the base-2 logarithm of x, with any
+// fractional part discarded.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, int>::type
+    bit_width(T x) noexcept {
+  return std::numeric_limits<T>::digits - countl_zero(x);
+}
+
+// Returns: If x == 0, 0; otherwise the maximal value y such that
+// has_single_bit(y) is true and y <= x.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, T>::type
+    bit_floor(T x) noexcept {
+  return x == 0 ? 0 : T{1} << (bit_width(x) - 1);
+}
+
+// Returns: N, where N is the smallest power of 2 greater than or equal to x.
+//
+// Preconditions: N is representable as a value of type T.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, T>::type
+    bit_ceil(T x) {
+  // If T is narrower than unsigned, T{1} << bit_width will be promoted.  We
+  // want to force it to wraparound so that bit_ceil of an invalid value are not
+  // core constant expressions.
+  //
+  // BitCeilNonPowerOf2 triggers an overflow in constexpr contexts if we would
+  // undergo promotion to unsigned but not fit the result into T without
+  // truncation.
+  return has_single_bit(x) ? T{1} << (bit_width(x) - 1)
+                           : numeric_internal::BitCeilNonPowerOf2(x);
+}
+#else  // defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L
+
+using std::bit_ceil;
+using std::bit_floor;
+using std::bit_width;
+using std::has_single_bit;
+
+#endif
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_NUMERIC_BITS_H_
diff --git a/abseil-cpp/absl/numeric/bits_benchmark.cc b/abseil-cpp/absl/numeric/bits_benchmark.cc
new file mode 100644
index 0000000..2c89afd
--- /dev/null
+++ b/abseil-cpp/absl/numeric/bits_benchmark.cc
@@ -0,0 +1,73 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "absl/base/optimization.h"
+#include "absl/numeric/bits.h"
+#include "absl/random/random.h"
+
+namespace absl {
+namespace {
+
+template <typename T>
+static void BM_bit_width(benchmark::State& state) {
+  const auto count = static_cast<size_t>(state.range(0));
+
+  absl::BitGen rng;
+  std::vector<T> values;
+  values.reserve(count);
+  for (size_t i = 0; i < count; ++i) {
+    values.push_back(absl::Uniform<T>(rng, 0, std::numeric_limits<T>::max()));
+  }
+
+  while (state.KeepRunningBatch(static_cast<int64_t>(count))) {
+    for (size_t i = 0; i < count; ++i) {
+      benchmark::DoNotOptimize(absl::bit_width(values[i]));
+    }
+  }
+}
+BENCHMARK_TEMPLATE(BM_bit_width, uint8_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bit_width, uint16_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bit_width, uint32_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bit_width, uint64_t)->Range(1, 1 << 20);
+
+template <typename T>
+static void BM_bit_width_nonzero(benchmark::State& state) {
+  const auto count = static_cast<size_t>(state.range(0));
+
+  absl::BitGen rng;
+  std::vector<T> values;
+  values.reserve(count);
+  for (size_t i = 0; i < count; ++i) {
+    values.push_back(absl::Uniform<T>(rng, 1, std::numeric_limits<T>::max()));
+  }
+
+  while (state.KeepRunningBatch(static_cast<int64_t>(count))) {
+    for (size_t i = 0; i < count; ++i) {
+      const T value = values[i];
+      ABSL_ASSUME(value > 0);
+      benchmark::DoNotOptimize(absl::bit_width(value));
+    }
+  }
+}
+BENCHMARK_TEMPLATE(BM_bit_width_nonzero, uint8_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bit_width_nonzero, uint16_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bit_width_nonzero, uint32_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bit_width_nonzero, uint64_t)->Range(1, 1 << 20);
+
+}  // namespace
+}  // namespace absl
diff --git a/abseil-cpp/absl/numeric/bits_test.cc b/abseil-cpp/absl/numeric/bits_test.cc
new file mode 100644
index 0000000..14955eb
--- /dev/null
+++ b/abseil-cpp/absl/numeric/bits_test.cc
@@ -0,0 +1,641 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/numeric/bits.h"
+
+#include <limits>
+#include <type_traits>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/random/random.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+template <typename IntT>
+class IntegerTypesTest : public ::testing::Test {};
+
+using OneByteIntegerTypes = ::testing::Types<
+    unsigned char,
+    uint8_t
+    >;
+
+TYPED_TEST_SUITE(IntegerTypesTest, OneByteIntegerTypes);
+
+TYPED_TEST(IntegerTypesTest, HandlesTypes) {
+  using UIntType = TypeParam;
+
+  EXPECT_EQ(rotl(UIntType{0x12}, 0), uint8_t{0x12});
+  EXPECT_EQ(rotr(UIntType{0x12}, -4), uint8_t{0x21});
+  static_assert(rotl(UIntType{0x12}, 0) == uint8_t{0x12}, "");
+
+  static_assert(rotr(UIntType{0x12}, 0) == uint8_t{0x12}, "");
+  EXPECT_EQ(rotr(UIntType{0x12}, 0), uint8_t{0x12});
+
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+  static_assert(countl_zero(UIntType{}) == 8, "");
+  static_assert(countl_zero(static_cast<UIntType>(-1)) == 0, "");
+
+  static_assert(countl_one(UIntType{}) == 0, "");
+  static_assert(countl_one(static_cast<UIntType>(-1)) == 8, "");
+
+  static_assert(countr_zero(UIntType{}) == 8, "");
+  static_assert(countr_zero(static_cast<UIntType>(-1)) == 0, "");
+
+  static_assert(countr_one(UIntType{}) == 0, "");
+  static_assert(countr_one(static_cast<UIntType>(-1)) == 8, "");
+
+  static_assert(popcount(UIntType{}) == 0, "");
+  static_assert(popcount(UIntType{1}) == 1, "");
+  static_assert(popcount(static_cast<UIntType>(-1)) == 8, "");
+
+  static_assert(bit_width(UIntType{}) == 0, "");
+  static_assert(bit_width(UIntType{1}) == 1, "");
+  static_assert(bit_width(UIntType{3}) == 2, "");
+  static_assert(bit_width(static_cast<UIntType>(-1)) == 8, "");
+#endif
+
+  EXPECT_EQ(countl_zero(UIntType{}), 8);
+  EXPECT_EQ(countl_zero(static_cast<UIntType>(-1)), 0);
+
+  EXPECT_EQ(countl_one(UIntType{}), 0);
+  EXPECT_EQ(countl_one(static_cast<UIntType>(-1)), 8);
+
+  EXPECT_EQ(countr_zero(UIntType{}), 8);
+  EXPECT_EQ(countr_zero(static_cast<UIntType>(-1)), 0);
+
+  EXPECT_EQ(countr_one(UIntType{}), 0);
+  EXPECT_EQ(countr_one(static_cast<UIntType>(-1)), 8);
+
+  EXPECT_EQ(popcount(UIntType{}), 0);
+  EXPECT_EQ(popcount(UIntType{1}), 1);
+
+  EXPECT_FALSE(has_single_bit(UIntType{}));
+  EXPECT_FALSE(has_single_bit(static_cast<UIntType>(-1)));
+
+  EXPECT_EQ(bit_width(UIntType{}), 0);
+  EXPECT_EQ(bit_width(UIntType{1}), 1);
+  EXPECT_EQ(bit_width(UIntType{3}), 2);
+  EXPECT_EQ(bit_width(static_cast<UIntType>(-1)), 8);
+}
+
+TEST(Rotate, Left) {
+  static_assert(rotl(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
+  static_assert(rotl(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
+  static_assert(rotl(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
+  static_assert(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
+                    uint64_t{0x12345678ABCDEF01ULL},
+                "");
+
+  EXPECT_EQ(rotl(uint8_t{0x12}, 0), uint8_t{0x12});
+  EXPECT_EQ(rotl(uint16_t{0x1234}, 0), uint16_t{0x1234});
+  EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
+  EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0),
+            uint64_t{0x12345678ABCDEF01ULL});
+
+  EXPECT_EQ(rotl(uint8_t{0x12}, 8), uint8_t{0x12});
+  EXPECT_EQ(rotl(uint16_t{0x1234}, 16), uint16_t{0x1234});
+  EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
+  EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 64),
+            uint64_t{0x12345678ABCDEF01ULL});
+
+  EXPECT_EQ(rotl(uint8_t{0x12}, -8), uint8_t{0x12});
+  EXPECT_EQ(rotl(uint16_t{0x1234}, -16), uint16_t{0x1234});
+  EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
+  EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -64),
+            uint64_t{0x12345678ABCDEF01ULL});
+
+  EXPECT_EQ(rotl(uint8_t{0x12}, 4), uint8_t{0x21});
+  EXPECT_EQ(rotl(uint16_t{0x1234}, 4), uint16_t{0x2341});
+  EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 4), uint32_t{0x23456781UL});
+  EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 4),
+            uint64_t{0x2345678ABCDEF011ULL});
+
+  EXPECT_EQ(rotl(uint8_t{0x12}, -4), uint8_t{0x21});
+  EXPECT_EQ(rotl(uint16_t{0x1234}, -4), uint16_t{0x4123});
+  EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -4), uint32_t{0x81234567UL});
+  EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -4),
+            uint64_t{0x112345678ABCDEF0ULL});
+}
+
+TEST(Rotate, Right) {
+  static_assert(rotr(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
+  static_assert(rotr(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
+  static_assert(rotr(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
+  static_assert(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
+                    uint64_t{0x12345678ABCDEF01ULL},
+                "");
+
+  EXPECT_EQ(rotr(uint8_t{0x12}, 0), uint8_t{0x12});
+  EXPECT_EQ(rotr(uint16_t{0x1234}, 0), uint16_t{0x1234});
+  EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
+  EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0),
+            uint64_t{0x12345678ABCDEF01ULL});
+
+  EXPECT_EQ(rotr(uint8_t{0x12}, 8), uint8_t{0x12});
+  EXPECT_EQ(rotr(uint16_t{0x1234}, 16), uint16_t{0x1234});
+  EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
+  EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 64),
+            uint64_t{0x12345678ABCDEF01ULL});
+
+  EXPECT_EQ(rotr(uint8_t{0x12}, -8), uint8_t{0x12});
+  EXPECT_EQ(rotr(uint16_t{0x1234}, -16), uint16_t{0x1234});
+  EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
+  EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -64),
+            uint64_t{0x12345678ABCDEF01ULL});
+
+  EXPECT_EQ(rotr(uint8_t{0x12}, 4), uint8_t{0x21});
+  EXPECT_EQ(rotr(uint16_t{0x1234}, 4), uint16_t{0x4123});
+  EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 4), uint32_t{0x81234567UL});
+  EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 4),
+            uint64_t{0x112345678ABCDEF0ULL});
+
+  EXPECT_EQ(rotr(uint8_t{0x12}, -4), uint8_t{0x21});
+  EXPECT_EQ(rotr(uint16_t{0x1234}, -4), uint16_t{0x2341});
+  EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -4), uint32_t{0x23456781UL});
+  EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -4),
+            uint64_t{0x2345678ABCDEF011ULL});
+}
+
+TEST(Rotate, Symmetry) {
+  // rotr(x, s) is equivalent to rotl(x, -s)
+  absl::BitGen rng;
+  constexpr int kTrials = 100;
+
+  for (int i = 0; i < kTrials; ++i) {
+    uint8_t value = absl::Uniform(rng, std::numeric_limits<uint8_t>::min(),
+                                  std::numeric_limits<uint8_t>::max());
+    int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint8_t>::digits,
+                              2 * std::numeric_limits<uint8_t>::digits);
+
+    EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
+  }
+
+  for (int i = 0; i < kTrials; ++i) {
+    uint16_t value = absl::Uniform(rng, std::numeric_limits<uint16_t>::min(),
+                                   std::numeric_limits<uint16_t>::max());
+    int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint16_t>::digits,
+                              2 * std::numeric_limits<uint16_t>::digits);
+
+    EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
+  }
+
+  for (int i = 0; i < kTrials; ++i) {
+    uint32_t value = absl::Uniform(rng, std::numeric_limits<uint32_t>::min(),
+                                   std::numeric_limits<uint32_t>::max());
+    int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint32_t>::digits,
+                              2 * std::numeric_limits<uint32_t>::digits);
+
+    EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
+  }
+
+  for (int i = 0; i < kTrials; ++i) {
+    uint64_t value = absl::Uniform(rng, std::numeric_limits<uint64_t>::min(),
+                                   std::numeric_limits<uint64_t>::max());
+    int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint64_t>::digits,
+                              2 * std::numeric_limits<uint64_t>::digits);
+
+    EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
+  }
+}
+
+TEST(Counting, LeadingZeroes) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+  static_assert(countl_zero(uint8_t{}) == 8, "");
+  static_assert(countl_zero(static_cast<uint8_t>(-1)) == 0, "");
+  static_assert(countl_zero(uint16_t{}) == 16, "");
+  static_assert(countl_zero(static_cast<uint16_t>(-1)) == 0, "");
+  static_assert(countl_zero(uint32_t{}) == 32, "");
+  static_assert(countl_zero(~uint32_t{}) == 0, "");
+  static_assert(countl_zero(uint64_t{}) == 64, "");
+  static_assert(countl_zero(~uint64_t{}) == 0, "");
+#endif
+
+  EXPECT_EQ(countl_zero(uint8_t{}), 8);
+  EXPECT_EQ(countl_zero(static_cast<uint8_t>(-1)), 0);
+  EXPECT_EQ(countl_zero(uint16_t{}), 16);
+  EXPECT_EQ(countl_zero(static_cast<uint16_t>(-1)), 0);
+  EXPECT_EQ(countl_zero(uint32_t{}), 32);
+  EXPECT_EQ(countl_zero(~uint32_t{}), 0);
+  EXPECT_EQ(countl_zero(uint64_t{}), 64);
+  EXPECT_EQ(countl_zero(~uint64_t{}), 0);
+
+  for (int i = 0; i < 8; i++) {
+    EXPECT_EQ(countl_zero(static_cast<uint8_t>(1u << i)), 7 - i);
+  }
+
+  for (int i = 0; i < 16; i++) {
+    EXPECT_EQ(countl_zero(static_cast<uint16_t>(1u << i)), 15 - i);
+  }
+
+  for (int i = 0; i < 32; i++) {
+    EXPECT_EQ(countl_zero(uint32_t{1} << i), 31 - i);
+  }
+
+  for (int i = 0; i < 64; i++) {
+    EXPECT_EQ(countl_zero(uint64_t{1} << i), 63 - i);
+  }
+}
+
+TEST(Counting, LeadingOnes) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+  static_assert(countl_one(uint8_t{}) == 0, "");
+  static_assert(countl_one(static_cast<uint8_t>(-1)) == 8, "");
+  static_assert(countl_one(uint16_t{}) == 0, "");
+  static_assert(countl_one(static_cast<uint16_t>(-1)) == 16, "");
+  static_assert(countl_one(uint32_t{}) == 0, "");
+  static_assert(countl_one(~uint32_t{}) == 32, "");
+  static_assert(countl_one(uint64_t{}) == 0, "");
+  static_assert(countl_one(~uint64_t{}) == 64, "");
+#endif
+
+  EXPECT_EQ(countl_one(uint8_t{}), 0);
+  EXPECT_EQ(countl_one(static_cast<uint8_t>(-1)), 8);
+  EXPECT_EQ(countl_one(uint16_t{}), 0);
+  EXPECT_EQ(countl_one(static_cast<uint16_t>(-1)), 16);
+  EXPECT_EQ(countl_one(uint32_t{}), 0);
+  EXPECT_EQ(countl_one(~uint32_t{}), 32);
+  EXPECT_EQ(countl_one(uint64_t{}), 0);
+  EXPECT_EQ(countl_one(~uint64_t{}), 64);
+}
+
+TEST(Counting, TrailingZeroes) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
+  static_assert(countr_zero(uint8_t{}) == 8, "");
+  static_assert(countr_zero(static_cast<uint8_t>(-1)) == 0, "");
+  static_assert(countr_zero(uint16_t{}) == 16, "");
+  static_assert(countr_zero(static_cast<uint16_t>(-1)) == 0, "");
+  static_assert(countr_zero(uint32_t{}) == 32, "");
+  static_assert(countr_zero(~uint32_t{}) == 0, "");
+  static_assert(countr_zero(uint64_t{}) == 64, "");
+  static_assert(countr_zero(~uint64_t{}) == 0, "");
+#endif
+
+  EXPECT_EQ(countr_zero(uint8_t{}), 8);
+  EXPECT_EQ(countr_zero(static_cast<uint8_t>(-1)), 0);
+  EXPECT_EQ(countr_zero(uint16_t{}), 16);
+  EXPECT_EQ(countr_zero(static_cast<uint16_t>(-1)), 0);
+  EXPECT_EQ(countr_zero(uint32_t{}), 32);
+  EXPECT_EQ(countr_zero(~uint32_t{}), 0);
+  EXPECT_EQ(countr_zero(uint64_t{}), 64);
+  EXPECT_EQ(countr_zero(~uint64_t{}), 0);
+}
+
+TEST(Counting, TrailingOnes) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
+  static_assert(countr_one(uint8_t{}) == 0, "");
+  static_assert(countr_one(static_cast<uint8_t>(-1)) == 8, "");
+  static_assert(countr_one(uint16_t{}) == 0, "");
+  static_assert(countr_one(static_cast<uint16_t>(-1)) == 16, "");
+  static_assert(countr_one(uint32_t{}) == 0, "");
+  static_assert(countr_one(~uint32_t{}) == 32, "");
+  static_assert(countr_one(uint64_t{}) == 0, "");
+  static_assert(countr_one(~uint64_t{}) == 64, "");
+#endif
+
+  EXPECT_EQ(countr_one(uint8_t{}), 0);
+  EXPECT_EQ(countr_one(static_cast<uint8_t>(-1)), 8);
+  EXPECT_EQ(countr_one(uint16_t{}), 0);
+  EXPECT_EQ(countr_one(static_cast<uint16_t>(-1)), 16);
+  EXPECT_EQ(countr_one(uint32_t{}), 0);
+  EXPECT_EQ(countr_one(~uint32_t{}), 32);
+  EXPECT_EQ(countr_one(uint64_t{}), 0);
+  EXPECT_EQ(countr_one(~uint64_t{}), 64);
+}
+
+TEST(Counting, Popcount) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT
+  static_assert(popcount(uint8_t{}) == 0, "");
+  static_assert(popcount(uint8_t{1}) == 1, "");
+  static_assert(popcount(static_cast<uint8_t>(-1)) == 8, "");
+  static_assert(popcount(uint16_t{}) == 0, "");
+  static_assert(popcount(uint16_t{1}) == 1, "");
+  static_assert(popcount(static_cast<uint16_t>(-1)) == 16, "");
+  static_assert(popcount(uint32_t{}) == 0, "");
+  static_assert(popcount(uint32_t{1}) == 1, "");
+  static_assert(popcount(~uint32_t{}) == 32, "");
+  static_assert(popcount(uint64_t{}) == 0, "");
+  static_assert(popcount(uint64_t{1}) == 1, "");
+  static_assert(popcount(~uint64_t{}) == 64, "");
+#endif  // ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT
+
+  EXPECT_EQ(popcount(uint8_t{}), 0);
+  EXPECT_EQ(popcount(uint8_t{1}), 1);
+  EXPECT_EQ(popcount(static_cast<uint8_t>(-1)), 8);
+  EXPECT_EQ(popcount(uint16_t{}), 0);
+  EXPECT_EQ(popcount(uint16_t{1}), 1);
+  EXPECT_EQ(popcount(static_cast<uint16_t>(-1)), 16);
+  EXPECT_EQ(popcount(uint32_t{}), 0);
+  EXPECT_EQ(popcount(uint32_t{1}), 1);
+  EXPECT_EQ(popcount(~uint32_t{}), 32);
+  EXPECT_EQ(popcount(uint64_t{}), 0);
+  EXPECT_EQ(popcount(uint64_t{1}), 1);
+  EXPECT_EQ(popcount(~uint64_t{}), 64);
+
+  for (int i = 0; i < 8; i++) {
+    EXPECT_EQ(popcount(static_cast<uint8_t>(uint8_t{1} << i)), 1);
+    EXPECT_EQ(popcount(static_cast<uint8_t>(static_cast<uint8_t>(-1) ^
+                                            (uint8_t{1} << i))),
+              7);
+  }
+
+  for (int i = 0; i < 16; i++) {
+    EXPECT_EQ(popcount(static_cast<uint16_t>(uint16_t{1} << i)), 1);
+    EXPECT_EQ(popcount(static_cast<uint16_t>(static_cast<uint16_t>(-1) ^
+                                             (uint16_t{1} << i))),
+              15);
+  }
+
+  for (int i = 0; i < 32; i++) {
+    EXPECT_EQ(popcount(uint32_t{1} << i), 1);
+    EXPECT_EQ(popcount(static_cast<uint32_t>(-1) ^ (uint32_t{1} << i)), 31);
+  }
+
+  for (int i = 0; i < 64; i++) {
+    EXPECT_EQ(popcount(uint64_t{1} << i), 1);
+    EXPECT_EQ(popcount(static_cast<uint64_t>(-1) ^ (uint64_t{1} << i)), 63);
+  }
+}
+
+template <typename T>
+struct PopcountInput {
+  T value = 0;
+  int expected = 0;
+};
+
+template <typename T>
+PopcountInput<T> GeneratePopcountInput(absl::BitGen& gen) {
+  PopcountInput<T> ret;
+  for (int i = 0; i < std::numeric_limits<T>::digits; i++) {
+    bool coin = absl::Bernoulli(gen, 0.2);
+    if (coin) {
+      ret.value |= T{1} << i;
+      ret.expected++;
+    }
+  }
+  return ret;
+}
+
+TEST(Counting, PopcountFuzz) {
+  absl::BitGen rng;
+  constexpr int kTrials = 100;
+
+  for (int i = 0; i < kTrials; ++i) {
+    auto input = GeneratePopcountInput<uint8_t>(rng);
+    EXPECT_EQ(popcount(input.value), input.expected);
+  }
+
+  for (int i = 0; i < kTrials; ++i) {
+    auto input = GeneratePopcountInput<uint16_t>(rng);
+    EXPECT_EQ(popcount(input.value), input.expected);
+  }
+
+  for (int i = 0; i < kTrials; ++i) {
+    auto input = GeneratePopcountInput<uint32_t>(rng);
+    EXPECT_EQ(popcount(input.value), input.expected);
+  }
+
+  for (int i = 0; i < kTrials; ++i) {
+    auto input = GeneratePopcountInput<uint64_t>(rng);
+    EXPECT_EQ(popcount(input.value), input.expected);
+  }
+}
+
+TEST(IntegralPowersOfTwo, SingleBit) {
+  EXPECT_FALSE(has_single_bit(uint8_t{}));
+  EXPECT_FALSE(has_single_bit(static_cast<uint8_t>(-1)));
+  EXPECT_FALSE(has_single_bit(uint16_t{}));
+  EXPECT_FALSE(has_single_bit(static_cast<uint16_t>(-1)));
+  EXPECT_FALSE(has_single_bit(uint32_t{}));
+  EXPECT_FALSE(has_single_bit(~uint32_t{}));
+  EXPECT_FALSE(has_single_bit(uint64_t{}));
+  EXPECT_FALSE(has_single_bit(~uint64_t{}));
+
+  static_assert(!has_single_bit(0u), "");
+  static_assert(has_single_bit(1u), "");
+  static_assert(has_single_bit(2u), "");
+  static_assert(!has_single_bit(3u), "");
+  static_assert(has_single_bit(4u), "");
+  static_assert(!has_single_bit(1337u), "");
+  static_assert(has_single_bit(65536u), "");
+  static_assert(has_single_bit(uint32_t{1} << 30), "");
+  static_assert(has_single_bit(uint64_t{1} << 42), "");
+
+  EXPECT_FALSE(has_single_bit(0u));
+  EXPECT_TRUE(has_single_bit(1u));
+  EXPECT_TRUE(has_single_bit(2u));
+  EXPECT_FALSE(has_single_bit(3u));
+  EXPECT_TRUE(has_single_bit(4u));
+  EXPECT_FALSE(has_single_bit(1337u));
+  EXPECT_TRUE(has_single_bit(65536u));
+  EXPECT_TRUE(has_single_bit(uint32_t{1} << 30));
+  EXPECT_TRUE(has_single_bit(uint64_t{1} << 42));
+
+  EXPECT_TRUE(has_single_bit(
+      static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() / 2 + 1)));
+  EXPECT_TRUE(has_single_bit(
+      static_cast<uint16_t>(std::numeric_limits<uint16_t>::max() / 2 + 1)));
+  EXPECT_TRUE(has_single_bit(
+      static_cast<uint32_t>(std::numeric_limits<uint32_t>::max() / 2 + 1)));
+  EXPECT_TRUE(has_single_bit(
+      static_cast<uint64_t>(std::numeric_limits<uint64_t>::max() / 2 + 1)));
+}
+
+template <typename T, T arg, T = bit_ceil(arg)>
+bool IsBitCeilConstantExpression(int) {
+  return true;
+}
+template <typename T, T arg>
+bool IsBitCeilConstantExpression(char) {
+  return false;
+}
+
+TEST(IntegralPowersOfTwo, Ceiling) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+  static_assert(bit_ceil(0u) == 1, "");
+  static_assert(bit_ceil(1u) == 1, "");
+  static_assert(bit_ceil(2u) == 2, "");
+  static_assert(bit_ceil(3u) == 4, "");
+  static_assert(bit_ceil(4u) == 4, "");
+  static_assert(bit_ceil(1337u) == 2048, "");
+  static_assert(bit_ceil(65536u) == 65536, "");
+  static_assert(bit_ceil(65536u - 1337u) == 65536, "");
+  static_assert(bit_ceil(uint32_t{0x80000000}) == uint32_t{0x80000000}, "");
+  static_assert(bit_ceil(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
+                "");
+  static_assert(
+      bit_ceil(uint64_t{0x8000000000000000}) == uint64_t{0x8000000000000000},
+      "");
+
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x0}>(0)));
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x80}>(0)));
+  EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x81}>(0)));
+  EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0xff}>(0)));
+
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x0}>(0)));
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8000}>(0)));
+  EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8001}>(0)));
+  EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0xffff}>(0)));
+
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x0}>(0)));
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000000}>(0)));
+  EXPECT_FALSE(
+      (IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000001}>(0)));
+  EXPECT_FALSE(
+      (IsBitCeilConstantExpression<uint32_t, uint32_t{0xffffffff}>(0)));
+
+  EXPECT_TRUE((IsBitCeilConstantExpression<uint64_t, uint64_t{0x0}>(0)));
+  EXPECT_TRUE(
+      (IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000000}>(0)));
+  EXPECT_FALSE(
+      (IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000001}>(0)));
+  EXPECT_FALSE(
+      (IsBitCeilConstantExpression<uint64_t, uint64_t{0xffffffffffffffff}>(0)));
+#endif
+
+  EXPECT_EQ(bit_ceil(0u), 1);
+  EXPECT_EQ(bit_ceil(1u), 1);
+  EXPECT_EQ(bit_ceil(2u), 2);
+  EXPECT_EQ(bit_ceil(3u), 4);
+  EXPECT_EQ(bit_ceil(4u), 4);
+  EXPECT_EQ(bit_ceil(1337u), 2048);
+  EXPECT_EQ(bit_ceil(65536u), 65536);
+  EXPECT_EQ(bit_ceil(65536u - 1337u), 65536);
+  EXPECT_EQ(bit_ceil(uint64_t{0x40000000000}), uint64_t{0x40000000000});
+}
+
+TEST(IntegralPowersOfTwo, Floor) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+  static_assert(bit_floor(0u) == 0, "");
+  static_assert(bit_floor(1u) == 1, "");
+  static_assert(bit_floor(2u) == 2, "");
+  static_assert(bit_floor(3u) == 2, "");
+  static_assert(bit_floor(4u) == 4, "");
+  static_assert(bit_floor(1337u) == 1024, "");
+  static_assert(bit_floor(65536u) == 65536, "");
+  static_assert(bit_floor(65536u - 1337u) == 32768, "");
+  static_assert(bit_floor(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
+                "");
+#endif
+
+  EXPECT_EQ(bit_floor(0u), 0);
+  EXPECT_EQ(bit_floor(1u), 1);
+  EXPECT_EQ(bit_floor(2u), 2);
+  EXPECT_EQ(bit_floor(3u), 2);
+  EXPECT_EQ(bit_floor(4u), 4);
+  EXPECT_EQ(bit_floor(1337u), 1024);
+  EXPECT_EQ(bit_floor(65536u), 65536);
+  EXPECT_EQ(bit_floor(65536u - 1337u), 32768);
+  EXPECT_EQ(bit_floor(uint64_t{0x40000000000}), uint64_t{0x40000000000});
+
+  for (int i = 0; i < 8; i++) {
+    uint8_t input = uint8_t{1} << i;
+    EXPECT_EQ(bit_floor(input), input);
+    if (i > 0) {
+      EXPECT_EQ(bit_floor(static_cast<uint8_t>(input + 1)), input);
+    }
+  }
+
+  for (int i = 0; i < 16; i++) {
+    uint16_t input = uint16_t{1} << i;
+    EXPECT_EQ(bit_floor(input), input);
+    if (i > 0) {
+      EXPECT_EQ(bit_floor(static_cast<uint16_t>(input + 1)), input);
+    }
+  }
+
+  for (int i = 0; i < 32; i++) {
+    uint32_t input = uint32_t{1} << i;
+    EXPECT_EQ(bit_floor(input), input);
+    if (i > 0) {
+      EXPECT_EQ(bit_floor(input + 1), input);
+    }
+  }
+
+  for (int i = 0; i < 64; i++) {
+    uint64_t input = uint64_t{1} << i;
+    EXPECT_EQ(bit_floor(input), input);
+    if (i > 0) {
+      EXPECT_EQ(bit_floor(input + 1), input);
+    }
+  }
+}
+
+TEST(IntegralPowersOfTwo, Width) {
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+  static_assert(bit_width(uint8_t{}) == 0, "");
+  static_assert(bit_width(uint8_t{1}) == 1, "");
+  static_assert(bit_width(uint8_t{3}) == 2, "");
+  static_assert(bit_width(static_cast<uint8_t>(-1)) == 8, "");
+  static_assert(bit_width(uint16_t{}) == 0, "");
+  static_assert(bit_width(uint16_t{1}) == 1, "");
+  static_assert(bit_width(uint16_t{3}) == 2, "");
+  static_assert(bit_width(static_cast<uint16_t>(-1)) == 16, "");
+  static_assert(bit_width(uint32_t{}) == 0, "");
+  static_assert(bit_width(uint32_t{1}) == 1, "");
+  static_assert(bit_width(uint32_t{3}) == 2, "");
+  static_assert(bit_width(~uint32_t{}) == 32, "");
+  static_assert(bit_width(uint64_t{}) == 0, "");
+  static_assert(bit_width(uint64_t{1}) == 1, "");
+  static_assert(bit_width(uint64_t{3}) == 2, "");
+  static_assert(bit_width(~uint64_t{}) == 64, "");
+#endif
+
+  EXPECT_EQ(bit_width(uint8_t{}), 0);
+  EXPECT_EQ(bit_width(uint8_t{1}), 1);
+  EXPECT_EQ(bit_width(uint8_t{3}), 2);
+  EXPECT_EQ(bit_width(static_cast<uint8_t>(-1)), 8);
+  EXPECT_EQ(bit_width(uint16_t{}), 0);
+  EXPECT_EQ(bit_width(uint16_t{1}), 1);
+  EXPECT_EQ(bit_width(uint16_t{3}), 2);
+  EXPECT_EQ(bit_width(static_cast<uint16_t>(-1)), 16);
+  EXPECT_EQ(bit_width(uint32_t{}), 0);
+  EXPECT_EQ(bit_width(uint32_t{1}), 1);
+  EXPECT_EQ(bit_width(uint32_t{3}), 2);
+  EXPECT_EQ(bit_width(~uint32_t{}), 32);
+  EXPECT_EQ(bit_width(uint64_t{}), 0);
+  EXPECT_EQ(bit_width(uint64_t{1}), 1);
+  EXPECT_EQ(bit_width(uint64_t{3}), 2);
+  EXPECT_EQ(bit_width(~uint64_t{}), 64);
+
+  for (int i = 0; i < 8; i++) {
+    EXPECT_EQ(bit_width(static_cast<uint8_t>(uint8_t{1} << i)), i + 1);
+  }
+
+  for (int i = 0; i < 16; i++) {
+    EXPECT_EQ(bit_width(static_cast<uint16_t>(uint16_t{1} << i)), i + 1);
+  }
+
+  for (int i = 0; i < 32; i++) {
+    EXPECT_EQ(bit_width(uint32_t{1} << i), i + 1);
+  }
+
+  for (int i = 0; i < 64; i++) {
+    EXPECT_EQ(bit_width(uint64_t{1} << i), i + 1);
+  }
+}
+
+// On GCC and Clang, anticiapte that implementations will be constexpr
+#if defined(__GNUC__)
+static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT,
+              "popcount should be constexpr");
+static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CLZ, "clz should be constexpr");
+static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CTZ, "ctz should be constexpr");
+#endif
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/numeric/int128.cc b/abseil-cpp/absl/numeric/int128.cc
index e21e5e9..daa32b5 100644
--- a/abseil-cpp/absl/numeric/int128.cc
+++ b/abseil-cpp/absl/numeric/int128.cc
@@ -23,8 +23,8 @@
 #include <string>
 #include <type_traits>
 
-#include "absl/base/internal/bits.h"
 #include "absl/base/optimization.h"
+#include "absl/numeric/bits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -42,12 +42,12 @@
 //   Returns: 2
 inline ABSL_ATTRIBUTE_ALWAYS_INLINE int Fls128(uint128 n) {
   if (uint64_t hi = Uint128High64(n)) {
-    ABSL_INTERNAL_ASSUME(hi != 0);
-    return 127 - base_internal::CountLeadingZeros64(hi);
+    ABSL_ASSUME(hi != 0);
+    return 127 - countl_zero(hi);
   }
   const uint64_t low = Uint128Low64(n);
-  ABSL_INTERNAL_ASSUME(low != 0);
-  return 63 - base_internal::CountLeadingZeros64(low);
+  ABSL_ASSUME(low != 0);
+  return 63 - countl_zero(low);
 }
 
 // Long division/modulo for uint128 implemented using the shift-subtract
@@ -111,7 +111,7 @@
   return MakeUint128(0, static_cast<uint64_t>(v));
 }
 
-#if defined(__clang__) && !defined(__SSE3__)
+#if defined(__clang__) && (__clang_major__ < 9) && !defined(__SSE3__)
 // Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
 // Casting from long double to uint64_t is miscompiled and drops bits.
 // It is more work, so only use when we need the workaround.
@@ -131,35 +131,28 @@
   return (static_cast<uint128>(w0) << 100) | (static_cast<uint128>(w1) << 50) |
          static_cast<uint128>(w2);
 }
-#endif  // __clang__ && !__SSE3__
+#endif  // __clang__ && (__clang_major__ < 9) && !__SSE3__
 }  // namespace
 
 uint128::uint128(float v) : uint128(MakeUint128FromFloat(v)) {}
 uint128::uint128(double v) : uint128(MakeUint128FromFloat(v)) {}
 uint128::uint128(long double v) : uint128(MakeUint128FromFloat(v)) {}
 
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
 uint128 operator/(uint128 lhs, uint128 rhs) {
-#if defined(ABSL_HAVE_INTRINSIC_INT128)
-  return static_cast<unsigned __int128>(lhs) /
-         static_cast<unsigned __int128>(rhs);
-#else  // ABSL_HAVE_INTRINSIC_INT128
   uint128 quotient = 0;
   uint128 remainder = 0;
   DivModImpl(lhs, rhs, &quotient, &remainder);
   return quotient;
-#endif  // ABSL_HAVE_INTRINSIC_INT128
 }
+
 uint128 operator%(uint128 lhs, uint128 rhs) {
-#if defined(ABSL_HAVE_INTRINSIC_INT128)
-  return static_cast<unsigned __int128>(lhs) %
-         static_cast<unsigned __int128>(rhs);
-#else  // ABSL_HAVE_INTRINSIC_INT128
   uint128 quotient = 0;
   uint128 remainder = 0;
   DivModImpl(lhs, rhs, &quotient, &remainder);
   return remainder;
-#endif  // ABSL_HAVE_INTRINSIC_INT128
 }
+#endif  // !defined(ABSL_HAVE_INTRINSIC_INT128)
 
 namespace {
 
@@ -209,6 +202,10 @@
 
 }  // namespace
 
+std::string uint128::ToString() const {
+  return Uint128ToFormattedString(*this, std::ios_base::dec);
+}
+
 std::ostream& operator<<(std::ostream& os, uint128 v) {
   std::ios_base::fmtflags flags = os.flags();
   std::string rep = Uint128ToFormattedString(v, flags);
@@ -216,15 +213,16 @@
   // Add the requisite padding.
   std::streamsize width = os.width(0);
   if (static_cast<size_t>(width) > rep.size()) {
+    const size_t count = static_cast<size_t>(width) - rep.size();
     std::ios::fmtflags adjustfield = flags & std::ios::adjustfield;
     if (adjustfield == std::ios::left) {
-      rep.append(width - rep.size(), os.fill());
+      rep.append(count, os.fill());
     } else if (adjustfield == std::ios::internal &&
                (flags & std::ios::showbase) &&
                (flags & std::ios::basefield) == std::ios::hex && v != 0) {
-      rep.insert(2, width - rep.size(), os.fill());
+      rep.insert(size_t{2}, count, os.fill());
     } else {
-      rep.insert(0, width - rep.size(), os.fill());
+      rep.insert(size_t{0}, count, os.fill());
     }
   }
 
@@ -291,6 +289,14 @@
 }
 #endif  // ABSL_HAVE_INTRINSIC_INT128
 
+std::string int128::ToString() const {
+  std::string rep;
+  if (Int128High64(*this) < 0) rep = "-";
+  rep.append(Uint128ToFormattedString(UnsignedAbsoluteValue(*this),
+                                      std::ios_base::dec));
+  return rep;
+}
+
 std::ostream& operator<<(std::ostream& os, int128 v) {
   std::ios_base::fmtflags flags = os.flags();
   std::string rep;
@@ -313,22 +319,23 @@
   // Add the requisite padding.
   std::streamsize width = os.width(0);
   if (static_cast<size_t>(width) > rep.size()) {
+    const size_t count = static_cast<size_t>(width) - rep.size();
     switch (flags & std::ios::adjustfield) {
       case std::ios::left:
-        rep.append(width - rep.size(), os.fill());
+        rep.append(count, os.fill());
         break;
       case std::ios::internal:
         if (print_as_decimal && (rep[0] == '+' || rep[0] == '-')) {
-          rep.insert(1, width - rep.size(), os.fill());
+          rep.insert(size_t{1}, count, os.fill());
         } else if ((flags & std::ios::basefield) == std::ios::hex &&
                    (flags & std::ios::showbase) && v != 0) {
-          rep.insert(2, width - rep.size(), os.fill());
+          rep.insert(size_t{2}, count, os.fill());
         } else {
-          rep.insert(0, width - rep.size(), os.fill());
+          rep.insert(size_t{0}, count, os.fill());
         }
         break;
       default:  // std::ios::right
-        rep.insert(0, width - rep.size(), os.fill());
+        rep.insert(size_t{0}, count, os.fill());
         break;
     }
   }
@@ -339,6 +346,7 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 namespace std {
 constexpr bool numeric_limits<absl::uint128>::is_specialized;
 constexpr bool numeric_limits<absl::uint128>::is_signed;
@@ -388,3 +396,4 @@
 constexpr bool numeric_limits<absl::int128>::traps;
 constexpr bool numeric_limits<absl::int128>::tinyness_before;
 }  // namespace std
+#endif
diff --git a/abseil-cpp/absl/numeric/int128.h b/abseil-cpp/absl/numeric/int128.h
index 0dd814a..7530a79 100644
--- a/abseil-cpp/absl/numeric/int128.h
+++ b/abseil-cpp/absl/numeric/int128.h
@@ -18,6 +18,10 @@
 // -----------------------------------------------------------------------------
 //
 // This header file defines 128-bit integer types, `uint128` and `int128`.
+//
+// TODO(absl-team): This module is inconsistent as many inline `uint128` methods
+// are defined in this file, while many inline `int128` methods are defined in
+// the `int128_*_intrinsic.inc` files.
 
 #ifndef ABSL_NUMERIC_INT128_H_
 #define ABSL_NUMERIC_INT128_H_
@@ -28,6 +32,7 @@
 #include <cstring>
 #include <iosfwd>
 #include <limits>
+#include <string>
 #include <utility>
 
 #include "absl/base/config.h"
@@ -40,7 +45,7 @@
 // builtin type.  We need to make sure not to define operator wchar_t()
 // alongside operator unsigned short() in these instances.
 #define ABSL_INTERNAL_WCHAR_T __wchar_t
-#if defined(_M_X64)
+#if defined(_M_X64) && !defined(_M_ARM64EC)
 #include <intrin.h>
 #pragma intrinsic(_umul128)
 #endif  // defined(_M_X64)
@@ -115,8 +120,8 @@
 #ifdef ABSL_HAVE_INTRINSIC_INT128
   constexpr uint128(__int128 v);           // NOLINT(runtime/explicit)
   constexpr uint128(unsigned __int128 v);  // NOLINT(runtime/explicit)
-#endif  // ABSL_HAVE_INTRINSIC_INT128
-  constexpr uint128(int128 v);  // NOLINT(runtime/explicit)
+#endif                                     // ABSL_HAVE_INTRINSIC_INT128
+  constexpr uint128(int128 v);             // NOLINT(runtime/explicit)
   explicit uint128(float v);
   explicit uint128(double v);
   explicit uint128(long double v);
@@ -213,9 +218,17 @@
     return H::combine(std::move(h), Uint128High64(v), Uint128Low64(v));
   }
 
+  // Support for absl::StrCat() etc.
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, uint128 v) {
+    sink.Append(v.ToString());
+  }
+
  private:
   constexpr uint128(uint64_t high, uint64_t low);
 
+  std::string ToString() const;
+
   // TODO(strel) Update implementation to use __int128 once all users of
   // uint128 are fixed to not depend on alignof(uint128) == 8. Also add
   // alignas(16) to class definition to keep alignment consistent across
@@ -282,9 +295,9 @@
 #endif  // ABSL_HAVE_INTRINSIC_INT128
   static constexpr bool tinyness_before = false;
 
-  static constexpr absl::uint128 (min)() { return 0; }
+  static constexpr absl::uint128(min)() { return 0; }
   static constexpr absl::uint128 lowest() { return 0; }
-  static constexpr absl::uint128 (max)() { return absl::Uint128Max(); }
+  static constexpr absl::uint128(max)() { return absl::Uint128Max(); }
   static constexpr absl::uint128 epsilon() { return 0; }
   static constexpr absl::uint128 round_error() { return 0; }
   static constexpr absl::uint128 infinity() { return 0; }
@@ -450,9 +463,17 @@
     return H::combine(std::move(h), Int128High64(v), Int128Low64(v));
   }
 
+  // Support for absl::StrCat() etc.
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, int128 v) {
+    sink.Append(v.ToString());
+  }
+
  private:
   constexpr int128(int64_t high, uint64_t low);
 
+  std::string ToString() const;
+
 #if defined(ABSL_HAVE_INTRINSIC_INT128)
   __int128 v_;
 #else  // ABSL_HAVE_INTRINSIC_INT128
@@ -517,9 +538,9 @@
 #endif  // ABSL_HAVE_INTRINSIC_INT128
   static constexpr bool tinyness_before = false;
 
-  static constexpr absl::int128 (min)() { return absl::Int128Min(); }
+  static constexpr absl::int128(min)() { return absl::Int128Min(); }
   static constexpr absl::int128 lowest() { return absl::Int128Min(); }
-  static constexpr absl::int128 (max)() { return absl::Int128Max(); }
+  static constexpr absl::int128(max)() { return absl::Int128Max(); }
   static constexpr absl::int128 epsilon() { return 0; }
   static constexpr absl::int128 round_error() { return 0; }
   static constexpr absl::int128 infinity() { return 0; }
@@ -557,9 +578,7 @@
 }
 
 // NOLINTNEXTLINE(runtime/int)
-inline uint128& uint128::operator=(long long v) {
-  return *this = uint128(v);
-}
+inline uint128& uint128::operator=(long long v) { return *this = uint128(v); }
 
 // NOLINTNEXTLINE(runtime/int)
 inline uint128& uint128::operator=(unsigned long long v) {
@@ -567,25 +586,21 @@
 }
 
 #ifdef ABSL_HAVE_INTRINSIC_INT128
-inline uint128& uint128::operator=(__int128 v) {
-  return *this = uint128(v);
-}
+inline uint128& uint128::operator=(__int128 v) { return *this = uint128(v); }
 
 inline uint128& uint128::operator=(unsigned __int128 v) {
   return *this = uint128(v);
 }
 #endif  // ABSL_HAVE_INTRINSIC_INT128
 
-inline uint128& uint128::operator=(int128 v) {
-  return *this = uint128(v);
-}
+inline uint128& uint128::operator=(int128 v) { return *this = uint128(v); }
 
 // Arithmetic operators.
 
-uint128 operator<<(uint128 lhs, int amount);
-uint128 operator>>(uint128 lhs, int amount);
-uint128 operator+(uint128 lhs, uint128 rhs);
-uint128 operator-(uint128 lhs, uint128 rhs);
+constexpr uint128 operator<<(uint128 lhs, int amount);
+constexpr uint128 operator>>(uint128 lhs, int amount);
+constexpr uint128 operator+(uint128 lhs, uint128 rhs);
+constexpr uint128 operator-(uint128 lhs, uint128 rhs);
 uint128 operator*(uint128 lhs, uint128 rhs);
 uint128 operator/(uint128 lhs, uint128 rhs);
 uint128 operator%(uint128 lhs, uint128 rhs);
@@ -633,8 +648,7 @@
 
 #if defined(ABSL_IS_LITTLE_ENDIAN)
 
-constexpr uint128::uint128(uint64_t high, uint64_t low)
-    : lo_{low}, hi_{high} {}
+constexpr uint128::uint128(uint64_t high, uint64_t low) : lo_{low}, hi_{high} {}
 
 constexpr uint128::uint128(int v)
     : lo_{static_cast<uint64_t>(v)},
@@ -666,8 +680,7 @@
 
 #elif defined(ABSL_IS_BIG_ENDIAN)
 
-constexpr uint128::uint128(uint64_t high, uint64_t low)
-    : hi_{high}, lo_{low} {}
+constexpr uint128::uint128(uint64_t high, uint64_t low) : hi_{high}, lo_{low} {}
 
 constexpr uint128::uint128(int v)
     : hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0},
@@ -782,16 +795,19 @@
 
 // Comparison operators.
 
-inline bool operator==(uint128 lhs, uint128 rhs) {
+constexpr bool operator==(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return static_cast<unsigned __int128>(lhs) ==
+         static_cast<unsigned __int128>(rhs);
+#else
   return (Uint128Low64(lhs) == Uint128Low64(rhs) &&
           Uint128High64(lhs) == Uint128High64(rhs));
+#endif
 }
 
-inline bool operator!=(uint128 lhs, uint128 rhs) {
-  return !(lhs == rhs);
-}
+constexpr bool operator!=(uint128 lhs, uint128 rhs) { return !(lhs == rhs); }
 
-inline bool operator<(uint128 lhs, uint128 rhs) {
+constexpr bool operator<(uint128 lhs, uint128 rhs) {
 #ifdef ABSL_HAVE_INTRINSIC_INT128
   return static_cast<unsigned __int128>(lhs) <
          static_cast<unsigned __int128>(rhs);
@@ -802,118 +818,165 @@
 #endif
 }
 
-inline bool operator>(uint128 lhs, uint128 rhs) { return rhs < lhs; }
+constexpr bool operator>(uint128 lhs, uint128 rhs) { return rhs < lhs; }
 
-inline bool operator<=(uint128 lhs, uint128 rhs) { return !(rhs < lhs); }
+constexpr bool operator<=(uint128 lhs, uint128 rhs) { return !(rhs < lhs); }
 
-inline bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
+constexpr bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
 
 // Unary operators.
 
-inline uint128 operator-(uint128 val) {
-  uint64_t hi = ~Uint128High64(val);
-  uint64_t lo = ~Uint128Low64(val) + 1;
-  if (lo == 0) ++hi;  // carry
-  return MakeUint128(hi, lo);
+constexpr inline uint128 operator+(uint128 val) { return val; }
+
+constexpr inline int128 operator+(int128 val) { return val; }
+
+constexpr uint128 operator-(uint128 val) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return -static_cast<unsigned __int128>(val);
+#else
+  return MakeUint128(
+      ~Uint128High64(val) + static_cast<unsigned long>(Uint128Low64(val) == 0),
+      ~Uint128Low64(val) + 1);
+#endif
 }
 
-inline bool operator!(uint128 val) {
+constexpr inline bool operator!(uint128 val) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return !static_cast<unsigned __int128>(val);
+#else
   return !Uint128High64(val) && !Uint128Low64(val);
+#endif
 }
 
 // Logical operators.
 
-inline uint128 operator~(uint128 val) {
+constexpr inline uint128 operator~(uint128 val) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return ~static_cast<unsigned __int128>(val);
+#else
   return MakeUint128(~Uint128High64(val), ~Uint128Low64(val));
+#endif
 }
 
-inline uint128 operator|(uint128 lhs, uint128 rhs) {
+constexpr inline uint128 operator|(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return static_cast<unsigned __int128>(lhs) |
+         static_cast<unsigned __int128>(rhs);
+#else
   return MakeUint128(Uint128High64(lhs) | Uint128High64(rhs),
-                           Uint128Low64(lhs) | Uint128Low64(rhs));
+                     Uint128Low64(lhs) | Uint128Low64(rhs));
+#endif
 }
 
-inline uint128 operator&(uint128 lhs, uint128 rhs) {
+constexpr inline uint128 operator&(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return static_cast<unsigned __int128>(lhs) &
+         static_cast<unsigned __int128>(rhs);
+#else
   return MakeUint128(Uint128High64(lhs) & Uint128High64(rhs),
-                           Uint128Low64(lhs) & Uint128Low64(rhs));
+                     Uint128Low64(lhs) & Uint128Low64(rhs));
+#endif
 }
 
-inline uint128 operator^(uint128 lhs, uint128 rhs) {
+constexpr inline uint128 operator^(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return static_cast<unsigned __int128>(lhs) ^
+         static_cast<unsigned __int128>(rhs);
+#else
   return MakeUint128(Uint128High64(lhs) ^ Uint128High64(rhs),
-                           Uint128Low64(lhs) ^ Uint128Low64(rhs));
+                     Uint128Low64(lhs) ^ Uint128Low64(rhs));
+#endif
 }
 
 inline uint128& uint128::operator|=(uint128 other) {
-  hi_ |= other.hi_;
-  lo_ |= other.lo_;
+  *this = *this | other;
   return *this;
 }
 
 inline uint128& uint128::operator&=(uint128 other) {
-  hi_ &= other.hi_;
-  lo_ &= other.lo_;
+  *this = *this & other;
   return *this;
 }
 
 inline uint128& uint128::operator^=(uint128 other) {
-  hi_ ^= other.hi_;
-  lo_ ^= other.lo_;
+  *this = *this ^ other;
   return *this;
 }
 
 // Arithmetic operators.
 
-inline uint128 operator<<(uint128 lhs, int amount) {
+constexpr uint128 operator<<(uint128 lhs, int amount) {
 #ifdef ABSL_HAVE_INTRINSIC_INT128
   return static_cast<unsigned __int128>(lhs) << amount;
 #else
   // uint64_t shifts of >= 64 are undefined, so we will need some
   // special-casing.
-  if (amount < 64) {
-    if (amount != 0) {
-      return MakeUint128(
-          (Uint128High64(lhs) << amount) | (Uint128Low64(lhs) >> (64 - amount)),
-          Uint128Low64(lhs) << amount);
-    }
-    return lhs;
-  }
-  return MakeUint128(Uint128Low64(lhs) << (amount - 64), 0);
+  return amount >= 64  ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0)
+         : amount == 0 ? lhs
+                       : MakeUint128((Uint128High64(lhs) << amount) |
+                                         (Uint128Low64(lhs) >> (64 - amount)),
+                                     Uint128Low64(lhs) << amount);
 #endif
 }
 
-inline uint128 operator>>(uint128 lhs, int amount) {
+constexpr uint128 operator>>(uint128 lhs, int amount) {
 #ifdef ABSL_HAVE_INTRINSIC_INT128
   return static_cast<unsigned __int128>(lhs) >> amount;
 #else
   // uint64_t shifts of >= 64 are undefined, so we will need some
   // special-casing.
-  if (amount < 64) {
-    if (amount != 0) {
-      return MakeUint128(Uint128High64(lhs) >> amount,
-                         (Uint128Low64(lhs) >> amount) |
-                             (Uint128High64(lhs) << (64 - amount)));
-    }
-    return lhs;
-  }
-  return MakeUint128(0, Uint128High64(lhs) >> (amount - 64));
+  return amount >= 64  ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64))
+         : amount == 0 ? lhs
+                       : MakeUint128(Uint128High64(lhs) >> amount,
+                                     (Uint128Low64(lhs) >> amount) |
+                                         (Uint128High64(lhs) << (64 - amount)));
 #endif
 }
 
-inline uint128 operator+(uint128 lhs, uint128 rhs) {
-  uint128 result = MakeUint128(Uint128High64(lhs) + Uint128High64(rhs),
-                               Uint128Low64(lhs) + Uint128Low64(rhs));
-  if (Uint128Low64(result) < Uint128Low64(lhs)) {  // check for carry
-    return MakeUint128(Uint128High64(result) + 1, Uint128Low64(result));
-  }
-  return result;
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
+namespace int128_internal {
+constexpr uint128 AddResult(uint128 result, uint128 lhs) {
+  // check for carry
+  return (Uint128Low64(result) < Uint128Low64(lhs))
+             ? MakeUint128(Uint128High64(result) + 1, Uint128Low64(result))
+             : result;
+}
+}  // namespace int128_internal
+#endif
+
+constexpr uint128 operator+(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return static_cast<unsigned __int128>(lhs) +
+         static_cast<unsigned __int128>(rhs);
+#else
+  return int128_internal::AddResult(
+      MakeUint128(Uint128High64(lhs) + Uint128High64(rhs),
+                  Uint128Low64(lhs) + Uint128Low64(rhs)),
+      lhs);
+#endif
 }
 
-inline uint128 operator-(uint128 lhs, uint128 rhs) {
-  uint128 result = MakeUint128(Uint128High64(lhs) - Uint128High64(rhs),
-                               Uint128Low64(lhs) - Uint128Low64(rhs));
-  if (Uint128Low64(lhs) < Uint128Low64(rhs)) {  // check for carry
-    return MakeUint128(Uint128High64(result) - 1, Uint128Low64(result));
-  }
-  return result;
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
+namespace int128_internal {
+constexpr uint128 SubstructResult(uint128 result, uint128 lhs, uint128 rhs) {
+  // check for carry
+  return (Uint128Low64(lhs) < Uint128Low64(rhs))
+             ? MakeUint128(Uint128High64(result) - 1, Uint128Low64(result))
+             : result;
+}
+}  // namespace int128_internal
+#endif
+
+constexpr uint128 operator-(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  return static_cast<unsigned __int128>(lhs) -
+         static_cast<unsigned __int128>(rhs);
+#else
+  return int128_internal::SubstructResult(
+      MakeUint128(Uint128High64(lhs) - Uint128High64(rhs),
+                  Uint128Low64(lhs) - Uint128Low64(rhs)),
+      lhs, rhs);
+#endif
 }
 
 inline uint128 operator*(uint128 lhs, uint128 rhs) {
@@ -922,7 +985,7 @@
   // can be used for uint128 storage.
   return static_cast<unsigned __int128>(lhs) *
          static_cast<unsigned __int128>(rhs);
-#elif defined(_MSC_VER) && defined(_M_X64)
+#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
   uint64_t carry;
   uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry);
   return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) +
@@ -943,6 +1006,18 @@
 #endif  // ABSL_HAVE_INTRINSIC128
 }
 
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+inline uint128 operator/(uint128 lhs, uint128 rhs) {
+  return static_cast<unsigned __int128>(lhs) /
+         static_cast<unsigned __int128>(rhs);
+}
+
+inline uint128 operator%(uint128 lhs, uint128 rhs) {
+  return static_cast<unsigned __int128>(lhs) %
+         static_cast<unsigned __int128>(rhs);
+}
+#endif
+
 // Increment/decrement operators.
 
 inline uint128 uint128::operator++(int) {
@@ -972,27 +1047,19 @@
 }
 
 // Assignment from integer types.
-inline int128& int128::operator=(int v) {
-  return *this = int128(v);
-}
+inline int128& int128::operator=(int v) { return *this = int128(v); }
 
-inline int128& int128::operator=(unsigned int v) {
-  return *this = int128(v);
-}
+inline int128& int128::operator=(unsigned int v) { return *this = int128(v); }
 
 inline int128& int128::operator=(long v) {  // NOLINT(runtime/int)
   return *this = int128(v);
 }
 
 // NOLINTNEXTLINE(runtime/int)
-inline int128& int128::operator=(unsigned long v) {
-  return *this = int128(v);
-}
+inline int128& int128::operator=(unsigned long v) { return *this = int128(v); }
 
 // NOLINTNEXTLINE(runtime/int)
-inline int128& int128::operator=(long long v) {
-  return *this = int128(v);
-}
+inline int128& int128::operator=(long long v) { return *this = int128(v); }
 
 // NOLINTNEXTLINE(runtime/int)
 inline int128& int128::operator=(unsigned long long v) {
@@ -1000,17 +1067,17 @@
 }
 
 // Arithmetic operators.
-
-int128 operator+(int128 lhs, int128 rhs);
-int128 operator-(int128 lhs, int128 rhs);
+constexpr int128 operator-(int128 v);
+constexpr int128 operator+(int128 lhs, int128 rhs);
+constexpr int128 operator-(int128 lhs, int128 rhs);
 int128 operator*(int128 lhs, int128 rhs);
 int128 operator/(int128 lhs, int128 rhs);
 int128 operator%(int128 lhs, int128 rhs);
-int128 operator|(int128 lhs, int128 rhs);
-int128 operator&(int128 lhs, int128 rhs);
-int128 operator^(int128 lhs, int128 rhs);
-int128 operator<<(int128 lhs, int amount);
-int128 operator>>(int128 lhs, int amount);
+constexpr int128 operator|(int128 lhs, int128 rhs);
+constexpr int128 operator&(int128 lhs, int128 rhs);
+constexpr int128 operator^(int128 lhs, int128 rhs);
+constexpr int128 operator<<(int128 lhs, int amount);
+constexpr int128 operator>>(int128 lhs, int amount);
 
 inline int128& int128::operator+=(int128 other) {
   *this = *this + other;
@@ -1062,6 +1129,9 @@
   return *this;
 }
 
+// Forward declaration for comparison operators.
+constexpr bool operator!=(int128 lhs, int128 rhs);
+
 namespace int128_internal {
 
 // Casts from unsigned to signed while preserving the underlying binary
diff --git a/abseil-cpp/absl/numeric/int128_have_intrinsic.inc b/abseil-cpp/absl/numeric/int128_have_intrinsic.inc
index d6c76dd..6f1ac64 100644
--- a/abseil-cpp/absl/numeric/int128_have_intrinsic.inc
+++ b/abseil-cpp/absl/numeric/int128_have_intrinsic.inc
@@ -155,16 +155,13 @@
 #if defined(__clang__) && !defined(__ppc64__)
 inline int128::operator float() const { return static_cast<float>(v_); }
 
-inline int128::operator double () const { return static_cast<double>(v_); }
+inline int128::operator double() const { return static_cast<double>(v_); }
 
 inline int128::operator long double() const {
   return static_cast<long double>(v_);
 }
 
 #else  // Clang on PowerPC
-// Forward declaration for conversion operators to floating point types.
-int128 operator-(int128 v);
-bool operator!=(int128 lhs, int128 rhs);
 
 inline int128::operator float() const {
   // We must convert the absolute value and then negate as needed, because
@@ -199,51 +196,45 @@
 
 // Comparison operators.
 
-inline bool operator==(int128 lhs, int128 rhs) {
+constexpr bool operator==(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) == static_cast<__int128>(rhs);
 }
 
-inline bool operator!=(int128 lhs, int128 rhs) {
+constexpr bool operator!=(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) != static_cast<__int128>(rhs);
 }
 
-inline bool operator<(int128 lhs, int128 rhs) {
+constexpr bool operator<(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) < static_cast<__int128>(rhs);
 }
 
-inline bool operator>(int128 lhs, int128 rhs) {
+constexpr bool operator>(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) > static_cast<__int128>(rhs);
 }
 
-inline bool operator<=(int128 lhs, int128 rhs) {
+constexpr bool operator<=(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) <= static_cast<__int128>(rhs);
 }
 
-inline bool operator>=(int128 lhs, int128 rhs) {
+constexpr bool operator>=(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) >= static_cast<__int128>(rhs);
 }
 
 // Unary operators.
 
-inline int128 operator-(int128 v) {
-  return -static_cast<__int128>(v);
-}
+constexpr int128 operator-(int128 v) { return -static_cast<__int128>(v); }
 
-inline bool operator!(int128 v) {
-  return !static_cast<__int128>(v);
-}
+constexpr bool operator!(int128 v) { return !static_cast<__int128>(v); }
 
-inline int128 operator~(int128 val) {
-  return ~static_cast<__int128>(val);
-}
+constexpr int128 operator~(int128 val) { return ~static_cast<__int128>(val); }
 
 // Arithmetic operators.
 
-inline int128 operator+(int128 lhs, int128 rhs) {
+constexpr int128 operator+(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) + static_cast<__int128>(rhs);
 }
 
-inline int128 operator-(int128 lhs, int128 rhs) {
+constexpr int128 operator-(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) - static_cast<__int128>(rhs);
 }
 
@@ -281,22 +272,22 @@
   return *this;
 }
 
-inline int128 operator|(int128 lhs, int128 rhs) {
+constexpr int128 operator|(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) | static_cast<__int128>(rhs);
 }
 
-inline int128 operator&(int128 lhs, int128 rhs) {
+constexpr int128 operator&(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) & static_cast<__int128>(rhs);
 }
 
-inline int128 operator^(int128 lhs, int128 rhs) {
+constexpr int128 operator^(int128 lhs, int128 rhs) {
   return static_cast<__int128>(lhs) ^ static_cast<__int128>(rhs);
 }
 
-inline int128 operator<<(int128 lhs, int amount) {
+constexpr int128 operator<<(int128 lhs, int amount) {
   return static_cast<__int128>(lhs) << amount;
 }
 
-inline int128 operator>>(int128 lhs, int amount) {
+constexpr int128 operator>>(int128 lhs, int amount) {
   return static_cast<__int128>(lhs) >> amount;
 }
diff --git a/abseil-cpp/absl/numeric/int128_no_intrinsic.inc b/abseil-cpp/absl/numeric/int128_no_intrinsic.inc
index c753771..6f5d837 100644
--- a/abseil-cpp/absl/numeric/int128_no_intrinsic.inc
+++ b/abseil-cpp/absl/numeric/int128_no_intrinsic.inc
@@ -23,8 +23,7 @@
 
 #if defined(ABSL_IS_LITTLE_ENDIAN)
 
-constexpr int128::int128(int64_t high, uint64_t low) :
-    lo_(low), hi_(high) {}
+constexpr int128::int128(int64_t high, uint64_t low) : lo_(low), hi_(high) {}
 
 constexpr int128::int128(int v)
     : lo_{static_cast<uint64_t>(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {}
@@ -44,8 +43,7 @@
 
 #elif defined(ABSL_IS_BIG_ENDIAN)
 
-constexpr int128::int128(int64_t high, uint64_t low) :
-    hi_{high}, lo_{low} {}
+constexpr int128::int128(int64_t high, uint64_t low) : hi_{high}, lo_{low} {}
 
 constexpr int128::int128(int v)
     : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast<uint64_t>(v)} {}
@@ -134,10 +132,6 @@
   return static_cast<unsigned long long>(lo_);           // NOLINT(runtime/int)
 }
 
-// Forward declaration for conversion operators to floating point types.
-int128 operator-(int128 v);
-bool operator!=(int128 lhs, int128 rhs);
-
 inline int128::operator float() const {
   // We must convert the absolute value and then negate as needed, because
   // floating point types are typically sign-magnitude. Otherwise, the
@@ -169,76 +163,80 @@
 
 // Comparison operators.
 
-inline bool operator==(int128 lhs, int128 rhs) {
+constexpr bool operator==(int128 lhs, int128 rhs) {
   return (Int128Low64(lhs) == Int128Low64(rhs) &&
           Int128High64(lhs) == Int128High64(rhs));
 }
 
-inline bool operator!=(int128 lhs, int128 rhs) {
-  return !(lhs == rhs);
-}
+constexpr bool operator!=(int128 lhs, int128 rhs) { return !(lhs == rhs); }
 
-inline bool operator<(int128 lhs, int128 rhs) {
+constexpr bool operator<(int128 lhs, int128 rhs) {
   return (Int128High64(lhs) == Int128High64(rhs))
              ? (Int128Low64(lhs) < Int128Low64(rhs))
              : (Int128High64(lhs) < Int128High64(rhs));
 }
 
-inline bool operator>(int128 lhs, int128 rhs) {
+constexpr bool operator>(int128 lhs, int128 rhs) {
   return (Int128High64(lhs) == Int128High64(rhs))
              ? (Int128Low64(lhs) > Int128Low64(rhs))
              : (Int128High64(lhs) > Int128High64(rhs));
 }
 
-inline bool operator<=(int128 lhs, int128 rhs) {
-  return !(lhs > rhs);
-}
+constexpr bool operator<=(int128 lhs, int128 rhs) { return !(lhs > rhs); }
 
-inline bool operator>=(int128 lhs, int128 rhs) {
-  return !(lhs < rhs);
-}
+constexpr bool operator>=(int128 lhs, int128 rhs) { return !(lhs < rhs); }
 
 // Unary operators.
 
-inline int128 operator-(int128 v) {
-  int64_t hi = ~Int128High64(v);
-  uint64_t lo = ~Int128Low64(v) + 1;
-  if (lo == 0) ++hi;  // carry
-  return MakeInt128(hi, lo);
+constexpr int128 operator-(int128 v) {
+  return MakeInt128(~Int128High64(v) + (Int128Low64(v) == 0),
+                    ~Int128Low64(v) + 1);
 }
 
-inline bool operator!(int128 v) {
+constexpr bool operator!(int128 v) {
   return !Int128Low64(v) && !Int128High64(v);
 }
 
-inline int128 operator~(int128 val) {
+constexpr int128 operator~(int128 val) {
   return MakeInt128(~Int128High64(val), ~Int128Low64(val));
 }
 
 // Arithmetic operators.
 
-inline int128 operator+(int128 lhs, int128 rhs) {
-  int128 result = MakeInt128(Int128High64(lhs) + Int128High64(rhs),
-                             Int128Low64(lhs) + Int128Low64(rhs));
-  if (Int128Low64(result) < Int128Low64(lhs)) {  // check for carry
-    return MakeInt128(Int128High64(result) + 1, Int128Low64(result));
-  }
-  return result;
+namespace int128_internal {
+constexpr int128 SignedAddResult(int128 result, int128 lhs) {
+  // check for carry
+  return (Int128Low64(result) < Int128Low64(lhs))
+             ? MakeInt128(Int128High64(result) + 1, Int128Low64(result))
+             : result;
+}
+}  // namespace int128_internal
+constexpr int128 operator+(int128 lhs, int128 rhs) {
+  return int128_internal::SignedAddResult(
+      MakeInt128(Int128High64(lhs) + Int128High64(rhs),
+                 Int128Low64(lhs) + Int128Low64(rhs)),
+      lhs);
 }
 
-inline int128 operator-(int128 lhs, int128 rhs) {
-  int128 result = MakeInt128(Int128High64(lhs) - Int128High64(rhs),
-                             Int128Low64(lhs) - Int128Low64(rhs));
-  if (Int128Low64(lhs) < Int128Low64(rhs)) {  // check for carry
-    return MakeInt128(Int128High64(result) - 1, Int128Low64(result));
-  }
-  return result;
+namespace int128_internal {
+constexpr int128 SignedSubstructResult(int128 result, int128 lhs, int128 rhs) {
+  // check for carry
+  return (Int128Low64(lhs) < Int128Low64(rhs))
+             ? MakeInt128(Int128High64(result) - 1, Int128Low64(result))
+             : result;
+}
+}  // namespace int128_internal
+constexpr int128 operator-(int128 lhs, int128 rhs) {
+  return int128_internal::SignedSubstructResult(
+      MakeInt128(Int128High64(lhs) - Int128High64(rhs),
+                 Int128Low64(lhs) - Int128Low64(rhs)),
+      lhs, rhs);
 }
 
 inline int128 operator*(int128 lhs, int128 rhs) {
-  uint128 result = uint128(lhs) * rhs;
-  return MakeInt128(int128_internal::BitCastToSigned(Uint128High64(result)),
-                    Uint128Low64(result));
+  return MakeInt128(
+      int128_internal::BitCastToSigned(Uint128High64(uint128(lhs) * rhs)),
+      Uint128Low64(uint128(lhs) * rhs));
 }
 
 inline int128 int128::operator++(int) {
@@ -263,46 +261,68 @@
   return *this;
 }
 
-inline int128 operator|(int128 lhs, int128 rhs) {
+constexpr int128 operator|(int128 lhs, int128 rhs) {
   return MakeInt128(Int128High64(lhs) | Int128High64(rhs),
                     Int128Low64(lhs) | Int128Low64(rhs));
 }
 
-inline int128 operator&(int128 lhs, int128 rhs) {
+constexpr int128 operator&(int128 lhs, int128 rhs) {
   return MakeInt128(Int128High64(lhs) & Int128High64(rhs),
                     Int128Low64(lhs) & Int128Low64(rhs));
 }
 
-inline int128 operator^(int128 lhs, int128 rhs) {
+constexpr int128 operator^(int128 lhs, int128 rhs) {
   return MakeInt128(Int128High64(lhs) ^ Int128High64(rhs),
                     Int128Low64(lhs) ^ Int128Low64(rhs));
 }
 
-inline int128 operator<<(int128 lhs, int amount) {
-  // uint64_t shifts of >= 64 are undefined, so we need some special-casing.
-  if (amount < 64) {
-    if (amount != 0) {
-      return MakeInt128(
-          (Int128High64(lhs) << amount) |
-              static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
-          Int128Low64(lhs) << amount);
-    }
+constexpr int128 operator<<(int128 lhs, int amount) {
+  // int64_t shifts of >= 63 are undefined, so we need some special-casing.
+  assert(amount >= 0 && amount < 127);
+  if (amount <= 0) {
     return lhs;
+  } else if (amount < 63) {
+    return MakeInt128(
+        (Int128High64(lhs) << amount) |
+            static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
+        Int128Low64(lhs) << amount);
+  } else if (amount == 63) {
+    return MakeInt128(((Int128High64(lhs) << 32) << 31) |
+                          static_cast<int64_t>(Int128Low64(lhs) >> 1),
+                      (Int128Low64(lhs) << 32) << 31);
+  } else if (amount == 127) {
+    return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << 63), 0);
+  } else if (amount > 127) {
+    return MakeInt128(0, 0);
+  } else {
+    // amount >= 64 && amount < 127
+    return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)),
+                      0);
   }
-  return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)), 0);
 }
 
-inline int128 operator>>(int128 lhs, int amount) {
-  // uint64_t shifts of >= 64 are undefined, so we need some special-casing.
-  if (amount < 64) {
-    if (amount != 0) {
-      return MakeInt128(
-          Int128High64(lhs) >> amount,
-          (Int128Low64(lhs) >> amount) |
-              (static_cast<uint64_t>(Int128High64(lhs)) << (64 - amount)));
-    }
+constexpr int128 operator>>(int128 lhs, int amount) {
+  // int64_t shifts of >= 63 are undefined, so we need some special-casing.
+  assert(amount >= 0 && amount < 127);
+  if (amount <= 0) {
     return lhs;
+  } else if (amount < 63) {
+    return MakeInt128(
+        Int128High64(lhs) >> amount,
+        Int128Low64(lhs) >> amount | static_cast<uint64_t>(Int128High64(lhs))
+                                         << (64 - amount));
+  } else if (amount == 63) {
+    return MakeInt128((Int128High64(lhs) >> 32) >> 31,
+                      static_cast<uint64_t>(Int128High64(lhs) << 1) |
+                          (Int128Low64(lhs) >> 32) >> 31);
+
+  } else if (amount >= 127) {
+    return MakeInt128((Int128High64(lhs) >> 32) >> 31,
+                      static_cast<uint64_t>((Int128High64(lhs) >> 32) >> 31));
+  } else {
+    // amount >= 64 && amount < 127
+    return MakeInt128(
+        (Int128High64(lhs) >> 32) >> 31,
+        static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)));
   }
-  return MakeInt128(0,
-                    static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)));
 }
diff --git a/abseil-cpp/absl/numeric/int128_stream_test.cc b/abseil-cpp/absl/numeric/int128_stream_test.cc
index 479ad66..bd93784 100644
--- a/abseil-cpp/absl/numeric/int128_stream_test.cc
+++ b/abseil-cpp/absl/numeric/int128_stream_test.cc
@@ -18,6 +18,7 @@
 #include <string>
 
 #include "gtest/gtest.h"
+#include "absl/strings/str_cat.h"
 
 namespace {
 
@@ -76,16 +77,6 @@
   return msg.str();
 }
 
-void CheckUint128Case(const Uint128TestCase& test_case) {
-  std::ostringstream os;
-  os.flags(test_case.flags);
-  os.width(test_case.width);
-  os.fill(kFill);
-  os << test_case.value;
-  SCOPED_TRACE(StreamFormatToString(test_case.flags, test_case.width));
-  EXPECT_EQ(test_case.expected, os.str());
-}
-
 constexpr std::ios::fmtflags kDec = std::ios::dec;
 constexpr std::ios::fmtflags kOct = std::ios::oct;
 constexpr std::ios::fmtflags kHex = std::ios::hex;
@@ -96,6 +87,19 @@
 constexpr std::ios::fmtflags kBase = std::ios::showbase;
 constexpr std::ios::fmtflags kPos = std::ios::showpos;
 
+void CheckUint128Case(const Uint128TestCase& test_case) {
+  if (test_case.flags == kDec && test_case.width == 0) {
+    EXPECT_EQ(absl::StrCat(test_case.value), test_case.expected);
+  }
+  std::ostringstream os;
+  os.flags(test_case.flags);
+  os.width(test_case.width);
+  os.fill(kFill);
+  os << test_case.value;
+  SCOPED_TRACE(StreamFormatToString(test_case.flags, test_case.width));
+  EXPECT_EQ(os.str(), test_case.expected);
+}
+
 TEST(Uint128, OStreamValueTest) {
   CheckUint128Case({1, kDec, /*width = */ 0, "1"});
   CheckUint128Case({1, kOct, /*width = */ 0, "1"});
@@ -155,13 +159,16 @@
 };
 
 void CheckInt128Case(const Int128TestCase& test_case) {
+  if (test_case.flags == kDec && test_case.width == 0) {
+    EXPECT_EQ(absl::StrCat(test_case.value), test_case.expected);
+  }
   std::ostringstream os;
   os.flags(test_case.flags);
   os.width(test_case.width);
   os.fill(kFill);
   os << test_case.value;
   SCOPED_TRACE(StreamFormatToString(test_case.flags, test_case.width));
-  EXPECT_EQ(test_case.expected, os.str());
+  EXPECT_EQ(os.str(), test_case.expected);
 }
 
 TEST(Int128, OStreamValueTest) {
@@ -194,35 +201,33 @@
       {absl::MakeInt128(1, 0), kHex, /*width = */ 0, "10000000000000000"});
   CheckInt128Case({absl::MakeInt128(std::numeric_limits<int64_t>::max(),
                                     std::numeric_limits<uint64_t>::max()),
-                   std::ios::dec, /*width = */ 0,
+                   kDec, /*width = */ 0,
                    "170141183460469231731687303715884105727"});
   CheckInt128Case({absl::MakeInt128(std::numeric_limits<int64_t>::max(),
                                     std::numeric_limits<uint64_t>::max()),
-                   std::ios::oct, /*width = */ 0,
+                   kOct, /*width = */ 0,
                    "1777777777777777777777777777777777777777777"});
   CheckInt128Case({absl::MakeInt128(std::numeric_limits<int64_t>::max(),
                                     std::numeric_limits<uint64_t>::max()),
-                   std::ios::hex, /*width = */ 0,
-                   "7fffffffffffffffffffffffffffffff"});
+                   kHex, /*width = */ 0, "7fffffffffffffffffffffffffffffff"});
   CheckInt128Case({absl::MakeInt128(std::numeric_limits<int64_t>::min(), 0),
-                   std::ios::dec, /*width = */ 0,
+                   kDec, /*width = */ 0,
                    "-170141183460469231731687303715884105728"});
   CheckInt128Case({absl::MakeInt128(std::numeric_limits<int64_t>::min(), 0),
-                   std::ios::oct, /*width = */ 0,
+                   kOct, /*width = */ 0,
                    "2000000000000000000000000000000000000000000"});
   CheckInt128Case({absl::MakeInt128(std::numeric_limits<int64_t>::min(), 0),
-                   std::ios::hex, /*width = */ 0,
-                   "80000000000000000000000000000000"});
-  CheckInt128Case({-1, std::ios::dec, /*width = */ 0, "-1"});
-  CheckInt128Case({-1, std::ios::oct, /*width = */ 0,
+                   kHex, /*width = */ 0, "80000000000000000000000000000000"});
+  CheckInt128Case({-1, kDec, /*width = */ 0, "-1"});
+  CheckInt128Case({-1, kOct, /*width = */ 0,
                    "3777777777777777777777777777777777777777777"});
   CheckInt128Case(
-      {-1, std::ios::hex, /*width = */ 0, "ffffffffffffffffffffffffffffffff"});
-  CheckInt128Case({-12345, std::ios::dec, /*width = */ 0, "-12345"});
-  CheckInt128Case({-12345, std::ios::oct, /*width = */ 0,
+      {-1, kHex, /*width = */ 0, "ffffffffffffffffffffffffffffffff"});
+  CheckInt128Case({-12345, kDec, /*width = */ 0, "-12345"});
+  CheckInt128Case({-12345, kOct, /*width = */ 0,
                    "3777777777777777777777777777777777777747707"});
-  CheckInt128Case({-12345, std::ios::hex, /*width = */ 0,
-                   "ffffffffffffffffffffffffffffcfc7"});
+  CheckInt128Case(
+      {-12345, kHex, /*width = */ 0, "ffffffffffffffffffffffffffffcfc7"});
 }
 
 std::vector<Int128TestCase> GetInt128FormatCases();
diff --git a/abseil-cpp/absl/numeric/int128_test.cc b/abseil-cpp/absl/numeric/int128_test.cc
index bc86c71..01e3eb5 100644
--- a/abseil-cpp/absl/numeric/int128_test.cc
+++ b/abseil-cpp/absl/numeric/int128_test.cc
@@ -32,6 +32,8 @@
 #pragma warning(disable:4146)
 #endif
 
+#define MAKE_INT128(HI, LO) absl::MakeInt128(static_cast<int64_t>(HI), LO)
+
 namespace {
 
 template <typename T>
@@ -226,6 +228,11 @@
   EXPECT_EQ(test >>= 1, one);
   EXPECT_EQ(test <<= 1, two);
 
+  EXPECT_EQ(big, +big);
+  EXPECT_EQ(two, +two);
+  EXPECT_EQ(absl::Uint128Max(), +absl::Uint128Max());
+  EXPECT_EQ(zero, +zero);
+
   EXPECT_EQ(big, -(-big));
   EXPECT_EQ(two, -((-one) - 1));
   EXPECT_EQ(absl::Uint128Max(), -one);
@@ -234,6 +241,24 @@
   EXPECT_EQ(absl::Uint128Max(), absl::kuint128max);
 }
 
+TEST(Int128, RightShiftOfNegativeNumbers) {
+  absl::int128 minus_six = -6;
+  absl::int128 minus_three = -3;
+  absl::int128 minus_two = -2;
+  absl::int128 minus_one = -1;
+  if ((-6 >> 1) == -3) {
+    // Right shift is arithmetic (sign propagates)
+    EXPECT_EQ(minus_six >> 1, minus_three);
+    EXPECT_EQ(minus_six >> 2, minus_two);
+    EXPECT_EQ(minus_six >> 65, minus_one);
+  } else {
+    // Right shift is logical (zeros shifted in at MSB)
+    EXPECT_EQ(minus_six >> 1, absl::int128(absl::uint128(minus_six) >> 1));
+    EXPECT_EQ(minus_six >> 2, absl::int128(absl::uint128(minus_six) >> 2));
+    EXPECT_EQ(minus_six >> 65, absl::int128(absl::uint128(minus_six) >> 65));
+  }
+}
+
 TEST(Uint128, ConversionTests) {
   EXPECT_TRUE(absl::MakeUint128(1, 0));
 
@@ -260,8 +285,9 @@
   EXPECT_EQ(from_precise_double, from_precise_ints);
   EXPECT_DOUBLE_EQ(static_cast<double>(from_precise_ints), precise_double);
 
-  double approx_double = 0xffffeeeeddddcccc * std::pow(2.0, 64.0) +
-                         0xbbbbaaaa99998888;
+  double approx_double =
+      static_cast<double>(0xffffeeeeddddcccc) * std::pow(2.0, 64.0) +
+      static_cast<double>(0xbbbbaaaa99998888);
   absl::uint128 from_approx_double(approx_double);
   EXPECT_DOUBLE_EQ(static_cast<double>(from_approx_double), approx_double);
 
@@ -769,6 +795,19 @@
   }
 }
 
+TEST(Int128, UnaryPlusTest) {
+  int64_t values64[] = {0, 1, 12345, 0x4000000000000000,
+                        std::numeric_limits<int64_t>::max()};
+  for (int64_t value : values64) {
+    SCOPED_TRACE(::testing::Message() << "value = " << value);
+
+    EXPECT_EQ(absl::int128(value), +absl::int128(value));
+    EXPECT_EQ(absl::int128(-value), +absl::int128(-value));
+    EXPECT_EQ(absl::MakeInt128(value, 0), +absl::MakeInt128(value, 0));
+    EXPECT_EQ(absl::MakeInt128(-value, 0), +absl::MakeInt128(-value, 0));
+  }
+}
+
 TEST(Int128, UnaryNegationTest) {
   int64_t values64[] = {0, 1, 12345, 0x4000000000000000,
                         std::numeric_limits<int64_t>::max()};
@@ -1209,6 +1248,27 @@
                 absl::MakeInt128(uint64_t{1} << j, 0) >>= (j - i));
     }
   }
+
+  // Manually calculated cases with shift count for positive (val1) and negative
+  // (val2) values
+  absl::int128 val1 = MAKE_INT128(0x123456789abcdef0, 0x123456789abcdef0);
+  absl::int128 val2 = MAKE_INT128(0xfedcba0987654321, 0xfedcba0987654321);
+
+  EXPECT_EQ(val1 << 63, MAKE_INT128(0x91a2b3c4d5e6f78, 0x0));
+  EXPECT_EQ(val1 << 64, MAKE_INT128(0x123456789abcdef0, 0x0));
+  EXPECT_EQ(val2 << 63, MAKE_INT128(0xff6e5d04c3b2a190, 0x8000000000000000));
+  EXPECT_EQ(val2 << 64, MAKE_INT128(0xfedcba0987654321, 0x0));
+
+  EXPECT_EQ(val1 << 126, MAKE_INT128(0x0, 0x0));
+  EXPECT_EQ(val2 << 126, MAKE_INT128(0x4000000000000000, 0x0));
+
+  EXPECT_EQ(val1 >> 63, MAKE_INT128(0x0, 0x2468acf13579bde0));
+  EXPECT_EQ(val1 >> 64, MAKE_INT128(0x0, 0x123456789abcdef0));
+  EXPECT_EQ(val2 >> 63, MAKE_INT128(0xffffffffffffffff, 0xfdb974130eca8643));
+  EXPECT_EQ(val2 >> 64, MAKE_INT128(0xffffffffffffffff, 0xfedcba0987654321));
+
+  EXPECT_EQ(val1 >> 126, MAKE_INT128(0x0, 0x0));
+  EXPECT_EQ(val2 >> 126, MAKE_INT128(0xffffffffffffffff, 0xffffffffffffffff));
 }
 
 TEST(Int128, NumericLimitsTest) {
diff --git a/abseil-cpp/absl/numeric/internal/bits.h b/abseil-cpp/absl/numeric/internal/bits.h
new file mode 100644
index 0000000..bfef06b
--- /dev/null
+++ b/abseil-cpp/absl/numeric/internal/bits.h
@@ -0,0 +1,358 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
+#define ABSL_NUMERIC_INTERNAL_BITS_H_
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+// Clang on Windows has __builtin_clzll; otherwise we need to use the
+// windows intrinsic functions.
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+#endif
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+#if defined(__GNUC__) && !defined(__clang__)
+// GCC
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
+#else
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
+    ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
+#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
+#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
+    ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
+#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_CLZ
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
+    ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
+#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_CTZ
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace numeric_internal {
+
+constexpr bool IsPowerOf2(unsigned int x) noexcept {
+  return x != 0 && (x & (x - 1)) == 0;
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
+    T x, int s) noexcept {
+  static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+  static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+                "T must have a power-of-2 size");
+
+  return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
+         static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
+    T x, int s) noexcept {
+  static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+  static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+                "T must have a power-of-2 size");
+
+  return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
+         static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount32(uint32_t x) noexcept {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
+  static_assert(sizeof(unsigned int) == sizeof(x),
+                "__builtin_popcount does not take 32-bit arg");
+  return __builtin_popcount(x);
+#else
+  x -= ((x >> 1) & 0x55555555);
+  x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
+  return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount64(uint64_t x) noexcept {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
+  static_assert(sizeof(unsigned long long) == sizeof(x),  // NOLINT(runtime/int)
+                "__builtin_popcount does not take 64-bit arg");
+  return __builtin_popcountll(x);
+#else
+  x -= (x >> 1) & 0x5555555555555555ULL;
+  x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
+  return static_cast<int>(
+      (((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
+#endif
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount(T x) noexcept {
+  static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+  static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+                "T must have a power-of-2 size");
+  static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
+  return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes32(uint32_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
+  // Use __builtin_clz, which uses the following instructions:
+  //  x86: bsr, lzcnt
+  //  ARM64: clz
+  //  PPC: cntlzd
+
+  static_assert(sizeof(unsigned int) == sizeof(x),
+                "__builtin_clz does not take 32-bit arg");
+  // Handle 0 as a special case because __builtin_clz(0) is undefined.
+  return x == 0 ? 32 : __builtin_clz(x);
+#elif defined(_MSC_VER) && !defined(__clang__)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if (_BitScanReverse(&result, x)) {
+    return 31 - result;
+  }
+  return 32;
+#else
+  int zeroes = 28;
+  if (x >> 16) {
+    zeroes -= 16;
+    x >>= 16;
+  }
+  if (x >> 8) {
+    zeroes -= 8;
+    x >>= 8;
+  }
+  if (x >> 4) {
+    zeroes -= 4;
+    x >>= 4;
+  }
+  return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes16(uint16_t x) {
+#if ABSL_HAVE_BUILTIN(__builtin_clzs)
+  static_assert(sizeof(unsigned short) == sizeof(x),  // NOLINT(runtime/int)
+                "__builtin_clzs does not take 16-bit arg");
+  return x == 0 ? 16 : __builtin_clzs(x);
+#else
+  return CountLeadingZeroes32(x) - 16;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes64(uint64_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
+  // Use __builtin_clzll, which uses the following instructions:
+  //  x86: bsr, lzcnt
+  //  ARM64: clz
+  //  PPC: cntlzd
+  static_assert(sizeof(unsigned long long) == sizeof(x),  // NOLINT(runtime/int)
+                "__builtin_clzll does not take 64-bit arg");
+
+  // Handle 0 as a special case because __builtin_clzll(0) is undefined.
+  return x == 0 ? 64 : __builtin_clzll(x);
+#elif defined(_MSC_VER) && !defined(__clang__) && \
+    (defined(_M_X64) || defined(_M_ARM64))
+  // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if (_BitScanReverse64(&result, x)) {
+    return 63 - result;
+  }
+  return 64;
+#elif defined(_MSC_VER) && !defined(__clang__)
+  // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if ((x >> 32) &&
+      _BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
+    return 31 - result;
+  }
+  if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
+    return 63 - result;
+  }
+  return 64;
+#else
+  int zeroes = 60;
+  if (x >> 32) {
+    zeroes -= 32;
+    x >>= 32;
+  }
+  if (x >> 16) {
+    zeroes -= 16;
+    x >>= 16;
+  }
+  if (x >> 8) {
+    zeroes -= 8;
+    x >>= 8;
+  }
+  if (x >> 4) {
+    zeroes -= 4;
+    x >>= 4;
+  }
+  return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
+#endif
+}
+
+template <typename T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes(T x) {
+  static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+  static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+                "T must have a power-of-2 size");
+  static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
+  return sizeof(T) <= sizeof(uint16_t)
+             ? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
+                   (std::numeric_limits<uint16_t>::digits -
+                    std::numeric_limits<T>::digits)
+             : (sizeof(T) <= sizeof(uint32_t)
+                    ? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
+                          (std::numeric_limits<uint32_t>::digits -
+                           std::numeric_limits<T>::digits)
+                    : CountLeadingZeroes64(x));
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero32(uint32_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
+  static_assert(sizeof(unsigned int) == sizeof(x),
+                "__builtin_ctz does not take 32-bit arg");
+  return __builtin_ctz(x);
+#elif defined(_MSC_VER) && !defined(__clang__)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  _BitScanForward(&result, x);
+  return result;
+#else
+  int c = 31;
+  x &= ~x + 1;
+  if (x & 0x0000FFFF) c -= 16;
+  if (x & 0x00FF00FF) c -= 8;
+  if (x & 0x0F0F0F0F) c -= 4;
+  if (x & 0x33333333) c -= 2;
+  if (x & 0x55555555) c -= 1;
+  return c;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero64(uint64_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
+  static_assert(sizeof(unsigned long long) == sizeof(x),  // NOLINT(runtime/int)
+                "__builtin_ctzll does not take 64-bit arg");
+  return __builtin_ctzll(x);
+#elif defined(_MSC_VER) && !defined(__clang__) && \
+    (defined(_M_X64) || defined(_M_ARM64))
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  _BitScanForward64(&result, x);
+  return result;
+#elif defined(_MSC_VER) && !defined(__clang__)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if (static_cast<uint32_t>(x) == 0) {
+    _BitScanForward(&result, static_cast<unsigned long>(x >> 32));
+    return result + 32;
+  }
+  _BitScanForward(&result, static_cast<unsigned long>(x));
+  return result;
+#else
+  int c = 63;
+  x &= ~x + 1;
+  if (x & 0x00000000FFFFFFFF) c -= 32;
+  if (x & 0x0000FFFF0000FFFF) c -= 16;
+  if (x & 0x00FF00FF00FF00FF) c -= 8;
+  if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
+  if (x & 0x3333333333333333) c -= 2;
+  if (x & 0x5555555555555555) c -= 1;
+  return c;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero16(uint16_t x) {
+#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
+  static_assert(sizeof(unsigned short) == sizeof(x),  // NOLINT(runtime/int)
+                "__builtin_ctzs does not take 16-bit arg");
+  return __builtin_ctzs(x);
+#else
+  return CountTrailingZeroesNonzero32(x);
+#endif
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroes(T x) noexcept {
+  static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+  static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+                "T must have a power-of-2 size");
+  static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
+  return x == 0 ? std::numeric_limits<T>::digits
+                : (sizeof(T) <= sizeof(uint16_t)
+                       ? CountTrailingZeroesNonzero16(static_cast<uint16_t>(x))
+                       : (sizeof(T) <= sizeof(uint32_t)
+                              ? CountTrailingZeroesNonzero32(
+                                    static_cast<uint32_t>(x))
+                              : CountTrailingZeroesNonzero64(x)));
+}
+
+// If T is narrower than unsigned, T{1} << bit_width will be promoted.  We
+// want to force it to wraparound so that bit_ceil of an invalid value are not
+// core constant expressions.
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, T>::type
+    BitCeilPromotionHelper(T x, T promotion) {
+  return (T{1} << (x + promotion)) >> promotion;
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
+    typename std::enable_if<std::is_unsigned<T>::value, T>::type
+    BitCeilNonPowerOf2(T x) {
+  // If T is narrower than unsigned, it undergoes promotion to unsigned when we
+  // shift.  We calculate the number of bits added by the wider type.
+  return BitCeilPromotionHelper(
+      static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
+      T{sizeof(T) >= sizeof(unsigned) ? 0
+                                      : std::numeric_limits<unsigned>::digits -
+                                            std::numeric_limits<T>::digits});
+}
+
+}  // namespace numeric_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_NUMERIC_INTERNAL_BITS_H_
diff --git a/abseil-cpp/absl/numeric/internal/representation.h b/abseil-cpp/absl/numeric/internal/representation.h
new file mode 100644
index 0000000..82d332f
--- /dev/null
+++ b/abseil-cpp/absl/numeric/internal/representation.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+#define ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+
+#include <limits>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace numeric_internal {
+
+// Returns true iff long double is represented as a pair of doubles added
+// together.
+inline constexpr bool IsDoubleDouble() {
+  // A double-double value always has exactly twice the precision of a double
+  // value--one double carries the high digits and one double carries the low
+  // digits. This property is not shared with any other common floating-point
+  // representation, so this test won't trigger false positives. For reference,
+  // this table gives the number of bits of precision of each common
+  // floating-point representation:
+  //
+  //                type     precision
+  //         IEEE single          24 b
+  //         IEEE double          53
+  //     x86 long double          64
+  //       double-double         106
+  //      IEEE quadruple         113
+  //
+  // Note in particular that a quadruple-precision float has greater precision
+  // than a double-double float despite taking up the same amount of memory; the
+  // quad has more of its bits allocated to the mantissa than the double-double
+  // has.
+  return std::numeric_limits<long double>::digits ==
+         2 * std::numeric_limits<double>::digits;
+}
+
+}  // namespace numeric_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
diff --git a/abseil-cpp/absl/profiling/BUILD.bazel b/abseil-cpp/absl/profiling/BUILD.bazel
new file mode 100644
index 0000000..3392c96
--- /dev/null
+++ b/abseil-cpp/absl/profiling/BUILD.bazel
@@ -0,0 +1,129 @@
+# Copyright 2021 The Abseil Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = ["//visibility:private"])
+
+licenses(["notice"])
+
+cc_library(
+    name = "sample_recorder",
+    hdrs = ["internal/sample_recorder.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/synchronization",
+        "//absl/time",
+    ],
+)
+
+cc_test(
+    name = "sample_recorder_test",
+    srcs = ["internal/sample_recorder_test.cc"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
+    deps = [
+        ":sample_recorder",
+        "//absl/base:core_headers",
+        "//absl/synchronization",
+        "//absl/synchronization:thread_pool",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "exponential_biased",
+    srcs = ["internal/exponential_biased.cc"],
+    hdrs = ["internal/exponential_biased.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_test(
+    name = "exponential_biased_test",
+    size = "small",
+    srcs = ["internal/exponential_biased_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":exponential_biased",
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "periodic_sampler",
+    srcs = ["internal/periodic_sampler.cc"],
+    hdrs = ["internal/periodic_sampler.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":exponential_biased",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_test(
+    name = "periodic_sampler_test",
+    size = "small",
+    srcs = ["internal/periodic_sampler_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":periodic_sampler",
+        "//absl/base:core_headers",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "periodic_sampler_benchmark",
+    testonly = 1,
+    srcs = ["internal/periodic_sampler_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":periodic_sampler",
+        "//absl/base:core_headers",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
diff --git a/abseil-cpp/absl/profiling/CMakeLists.txt b/abseil-cpp/absl/profiling/CMakeLists.txt
new file mode 100644
index 0000000..9b3a710
--- /dev/null
+++ b/abseil-cpp/absl/profiling/CMakeLists.txt
@@ -0,0 +1,93 @@
+# Copyright 2021 The Abseil Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+absl_cc_library(
+  NAME
+    sample_recorder
+  HDRS
+    "internal/sample_recorder.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::synchronization
+)
+
+absl_cc_test(
+  NAME
+    sample_recorder_test
+  SRCS
+    "internal/sample_recorder_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::sample_recorder
+    absl::time
+    GTest::gmock_main
+)
+
+absl_cc_library(
+  NAME
+    exponential_biased
+  SRCS
+    "internal/exponential_biased.cc"
+  HDRS
+    "internal/exponential_biased.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+)
+
+absl_cc_test(
+  NAME
+    exponential_biased_test
+  SRCS
+    "internal/exponential_biased_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::exponential_biased
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_library(
+  NAME
+    periodic_sampler
+  SRCS
+    "internal/periodic_sampler.cc"
+  HDRS
+    "internal/periodic_sampler.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::core_headers
+    absl::exponential_biased
+)
+
+absl_cc_test(
+  NAME
+    periodic_sampler_test
+  SRCS
+    "internal/periodic_sampler_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::core_headers
+    absl::periodic_sampler
+    GTest::gmock_main
+)
+
diff --git a/abseil-cpp/absl/base/internal/exponential_biased.cc b/abseil-cpp/absl/profiling/internal/exponential_biased.cc
similarity index 95%
rename from abseil-cpp/absl/base/internal/exponential_biased.cc
rename to abseil-cpp/absl/profiling/internal/exponential_biased.cc
index 1b30c06..81d9a75 100644
--- a/abseil-cpp/absl/base/internal/exponential_biased.cc
+++ b/abseil-cpp/absl/profiling/internal/exponential_biased.cc
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "absl/base/internal/exponential_biased.h"
+#include "absl/profiling/internal/exponential_biased.h"
 
 #include <stdint.h>
 
@@ -26,7 +26,7 @@
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
 
 // The algorithm generates a random number between 0 and 1 and applies the
 // inverse cumulative distribution function for an exponential. Specifically:
@@ -64,7 +64,7 @@
     // Assume huge values are bias neutral, retain bias for next call.
     return std::numeric_limits<int64_t>::max() / 2;
   }
-  double value = std::round(interval);
+  double value = std::rint(interval);
   bias_ = interval - value;
   return value;
 }
@@ -88,6 +88,6 @@
   initialized_ = true;
 }
 
-}  // namespace base_internal
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/exponential_biased.h b/abseil-cpp/absl/profiling/internal/exponential_biased.h
similarity index 94%
rename from abseil-cpp/absl/base/internal/exponential_biased.h
rename to abseil-cpp/absl/profiling/internal/exponential_biased.h
index 94f79a3..d31f778 100644
--- a/abseil-cpp/absl/base/internal/exponential_biased.h
+++ b/abseil-cpp/absl/profiling/internal/exponential_biased.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_
-#define ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_
+#ifndef ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_
+#define ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_
 
 #include <stdint.h>
 
@@ -22,7 +22,7 @@
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
 
 // ExponentialBiased provides a small and fast random number generator for a
 // rounded exponential distribution. This generator manages very little state,
@@ -66,7 +66,7 @@
 // Adjusting with rounding bias is relatively trivial:
 //
 //    double value = bias_ + exponential_distribution(mean)();
-//    double rounded_value = std::round(value);
+//    double rounded_value = std::rint(value);
 //    bias_ = value - rounded_value;
 //    return rounded_value;
 //
@@ -123,8 +123,8 @@
   return (prng_mult * rnd + prng_add) & prng_mod_mask;
 }
 
-}  // namespace base_internal
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_
+#endif  // ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_
diff --git a/abseil-cpp/absl/base/internal/exponential_biased_test.cc b/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc
similarity index 88%
rename from abseil-cpp/absl/base/internal/exponential_biased_test.cc
rename to abseil-cpp/absl/profiling/internal/exponential_biased_test.cc
index 90a482d..ebfbcad 100644
--- a/abseil-cpp/absl/base/internal/exponential_biased_test.cc
+++ b/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "absl/base/internal/exponential_biased.h"
+#include "absl/profiling/internal/exponential_biased.h"
 
 #include <stddef.h>
 
@@ -28,7 +28,8 @@
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
+namespace {
 
 MATCHER_P2(IsBetween, a, b,
            absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a,
@@ -93,13 +94,14 @@
 }
 
 double AndersonDarlingStatistic(const std::vector<double>& random_sample) {
-  int n = random_sample.size();
+  size_t n = random_sample.size();
   double ad_sum = 0;
-  for (int i = 0; i < n; i++) {
+  for (size_t i = 0; i < n; i++) {
     ad_sum += (2 * i + 1) *
               std::log(random_sample[i] * (1 - random_sample[n - 1 - i]));
   }
-  double ad_statistic = -n - 1 / static_cast<double>(n) * ad_sum;
+  const auto n_as_double = static_cast<double>(n);
+  double ad_statistic = -n_as_double - 1 / n_as_double * ad_sum;
   return ad_statistic;
 }
 
@@ -110,14 +112,15 @@
 // Marsaglia and Marsaglia for details.
 double AndersonDarlingTest(const std::vector<double>& random_sample) {
   double ad_statistic = AndersonDarlingStatistic(random_sample);
-  double p = AndersonDarlingPValue(random_sample.size(), ad_statistic);
+  double p = AndersonDarlingPValue(static_cast<int>(random_sample.size()),
+                                   ad_statistic);
   return p;
 }
 
 TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) {
   ExponentialBiased eb;
   for (int runs = 0; runs < 10; ++runs) {
-    for (int flips = eb.GetSkipCount(1); flips > 0; --flips) {
+    for (int64_t flips = eb.GetSkipCount(1); flips > 0; --flips) {
       printf("head...");
     }
     printf("tail\n");
@@ -131,7 +134,7 @@
 
 TEST(ExponentialBiasedTest, SampleDemoWithStride) {
   ExponentialBiased eb;
-  int stride = eb.GetStride(10);
+  int64_t stride = eb.GetStride(10);
   int samples = 0;
   for (int i = 0; i < 10000000; ++i) {
     if (--stride == 0) {
@@ -146,7 +149,7 @@
 // Testing that NextRandom generates uniform random numbers. Applies the
 // Anderson-Darling test for uniformity
 TEST(ExponentialBiasedTest, TestNextRandom) {
-  for (auto n : std::vector<int>({
+  for (auto n : std::vector<size_t>({
            10,  // Check short-range correlation
            100, 1000,
            10000  // Make sure there's no systemic error
@@ -160,7 +163,7 @@
     }
     std::vector<uint64_t> int_random_sample(n);
     // Collect samples
-    for (int i = 0; i < n; i++) {
+    for (size_t i = 0; i < n; i++) {
       int_random_sample[i] = x;
       x = ExponentialBiased::NextRandom(x);
     }
@@ -168,7 +171,7 @@
     std::sort(int_random_sample.begin(), int_random_sample.end());
     std::vector<double> random_sample(n);
     // Convert them to uniform randoms (in the range [0,1])
-    for (int i = 0; i < n; i++) {
+    for (size_t i = 0; i < n; i++) {
       random_sample[i] =
           static_cast<double>(int_random_sample[i]) / max_prng_value;
     }
@@ -185,7 +188,7 @@
   ABSL_CONST_INIT static ExponentialBiased eb_static;
   EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0));
 
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
   thread_local ExponentialBiased eb_thread;
   EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0));
 #endif
@@ -194,6 +197,7 @@
   EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0));
 }
 
-}  // namespace base_internal
+}  // namespace
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/periodic_sampler.cc b/abseil-cpp/absl/profiling/internal/periodic_sampler.cc
similarity index 88%
rename from abseil-cpp/absl/base/internal/periodic_sampler.cc
rename to abseil-cpp/absl/profiling/internal/periodic_sampler.cc
index 520dabb..a738a82 100644
--- a/abseil-cpp/absl/base/internal/periodic_sampler.cc
+++ b/abseil-cpp/absl/profiling/internal/periodic_sampler.cc
@@ -12,15 +12,15 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "absl/base/internal/periodic_sampler.h"
+#include "absl/profiling/internal/periodic_sampler.h"
 
 #include <atomic>
 
-#include "absl/base/internal/exponential_biased.h"
+#include "absl/profiling/internal/exponential_biased.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
 
 int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept {
   return rng_.GetStride(period);
@@ -48,6 +48,6 @@
   return true;
 }
 
-}  // namespace base_internal
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/periodic_sampler.h b/abseil-cpp/absl/profiling/internal/periodic_sampler.h
similarity index 95%
rename from abseil-cpp/absl/base/internal/periodic_sampler.h
rename to abseil-cpp/absl/profiling/internal/periodic_sampler.h
index f8a8679..54f0af4 100644
--- a/abseil-cpp/absl/base/internal/periodic_sampler.h
+++ b/abseil-cpp/absl/profiling/internal/periodic_sampler.h
@@ -12,19 +12,19 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#ifndef ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_
-#define ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_
+#ifndef ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_
+#define ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_
 
 #include <stdint.h>
 
 #include <atomic>
 
-#include "absl/base/internal/exponential_biased.h"
 #include "absl/base/optimization.h"
+#include "absl/profiling/internal/exponential_biased.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
 
 // PeriodicSamplerBase provides the basic period sampler implementation.
 //
@@ -149,7 +149,7 @@
   //   ICC   x64 (OK) : https://gcc.godbolt.org/z/ptTNfD
   //   MSVC  x64 (OK) : https://gcc.godbolt.org/z/76j4-5
   uint64_t stride_ = 0;
-  ExponentialBiased rng_;
+  absl::profiling_internal::ExponentialBiased rng_;
 };
 
 inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept {
@@ -204,8 +204,8 @@
 template <typename Tag, int default_period>
 std::atomic<int> PeriodicSampler<Tag, default_period>::period_(default_period);
 
-}  // namespace base_internal
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_
+#endif  // ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_
diff --git a/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc b/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc
similarity index 94%
rename from abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc
rename to abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc
index 5ad469c..8f0e557 100644
--- a/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc
+++ b/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc
@@ -12,12 +12,12 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include "absl/profiling/internal/periodic_sampler.h"
 #include "benchmark/benchmark.h"
-#include "absl/base/internal/periodic_sampler.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
 namespace {
 
 template <typename Sampler>
@@ -74,6 +74,6 @@
 BENCHMARK(BM_PeriodicSampler_Disabled);
 
 }  // namespace
-}  // namespace base_internal
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/base/internal/periodic_sampler_test.cc b/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc
similarity index 97%
rename from abseil-cpp/absl/base/internal/periodic_sampler_test.cc
rename to abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc
index 3b301e3..ef986f3 100644
--- a/abseil-cpp/absl/base/internal/periodic_sampler_test.cc
+++ b/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "absl/base/internal/periodic_sampler.h"
+#include "absl/profiling/internal/periodic_sampler.h"
 
 #include <thread>  // NOLINT(build/c++11)
 
@@ -23,7 +23,7 @@
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace profiling_internal {
 namespace {
 
 using testing::Eq;
@@ -172,6 +172,6 @@
 }
 
 }  // namespace
-}  // namespace base_internal
+}  // namespace profiling_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/profiling/internal/sample_recorder.h b/abseil-cpp/absl/profiling/internal/sample_recorder.h
new file mode 100644
index 0000000..371f6c4
--- /dev/null
+++ b/abseil-cpp/absl/profiling/internal/sample_recorder.h
@@ -0,0 +1,253 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: sample_recorder.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a lock-free linked list for recording samples
+// collected from a random/stochastic process.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
+#define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
+
+#include <atomic>
+#include <cstddef>
+#include <functional>
+
+#include "absl/base/config.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+// Sample<T> that has members required for linking samples in the linked list of
+// samples maintained by the SampleRecorder.  Type T defines the sampled data.
+template <typename T>
+struct Sample {
+  // Guards the ability to restore the sample to a pristine state.  This
+  // prevents races with sampling and resurrecting an object.
+  absl::Mutex init_mu;
+  T* next = nullptr;
+  T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
+  int64_t weight;  // How many sampling events were required to sample this one.
+};
+
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+template <typename T>
+class SampleRecorder {
+ public:
+  SampleRecorder();
+  ~SampleRecorder();
+
+  // Registers for sampling.  Returns an opaque registration info.
+  template <typename... Targs>
+  T* Register(Targs&&... args);
+
+  // Unregisters the sample.
+  void Unregister(T* sample);
+
+  // The dispose callback will be called on all samples the moment they are
+  // being unregistered. Only affects samples that are unregistered after the
+  // callback has been set.
+  // Returns the previous callback.
+  using DisposeCallback = void (*)(const T&);
+  DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+  // Iterates over all the registered `StackInfo`s.  Returning the number of
+  // samples that have been dropped.
+  int64_t Iterate(const std::function<void(const T& stack)>& f);
+
+  size_t GetMaxSamples() const;
+  void SetMaxSamples(size_t max);
+
+ private:
+  void PushNew(T* sample);
+  void PushDead(T* sample);
+  template <typename... Targs>
+  T* PopDead(Targs... args);
+
+  std::atomic<size_t> dropped_samples_;
+  std::atomic<size_t> size_estimate_;
+  std::atomic<size_t> max_samples_{1 << 20};
+
+  // Intrusive lock free linked lists for tracking samples.
+  //
+  // `all_` records all samples (they are never removed from this list) and is
+  // terminated with a `nullptr`.
+  //
+  // `graveyard_.dead` is a circular linked list.  When it is empty,
+  // `graveyard_.dead == &graveyard`.  The list is circular so that
+  // every item on it (even the last) has a non-null dead pointer.  This allows
+  // `Iterate` to determine if a given sample is live or dead using only
+  // information on the sample itself.
+  //
+  // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+  // looks like this (G is the Graveyard):
+  //
+  //           +---+    +---+    +---+    +---+    +---+
+  //    all -->| A |--->| B |--->| C |--->| D |--->| E |
+  //           |   |    |   |    |   |    |   |    |   |
+  //   +---+   |   | +->|   |-+  |   | +->|   |-+  |   |
+  //   | G |   +---+ |  +---+ |  +---+ |  +---+ |  +---+
+  //   |   |         |        |        |        |
+  //   |   | --------+        +--------+        |
+  //   +---+                                    |
+  //     ^                                      |
+  //     +--------------------------------------+
+  //
+  std::atomic<T*> all_;
+  T graveyard_;
+
+  std::atomic<DisposeCallback> dispose_;
+};
+
+template <typename T>
+typename SampleRecorder<T>::DisposeCallback
+SampleRecorder<T>::SetDisposeCallback(DisposeCallback f) {
+  return dispose_.exchange(f, std::memory_order_relaxed);
+}
+
+template <typename T>
+SampleRecorder<T>::SampleRecorder()
+    : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
+  absl::MutexLock l(&graveyard_.init_mu);
+  graveyard_.dead = &graveyard_;
+}
+
+template <typename T>
+SampleRecorder<T>::~SampleRecorder() {
+  T* s = all_.load(std::memory_order_acquire);
+  while (s != nullptr) {
+    T* next = s->next;
+    delete s;
+    s = next;
+  }
+}
+
+template <typename T>
+void SampleRecorder<T>::PushNew(T* sample) {
+  sample->next = all_.load(std::memory_order_relaxed);
+  while (!all_.compare_exchange_weak(sample->next, sample,
+                                     std::memory_order_release,
+                                     std::memory_order_relaxed)) {
+  }
+}
+
+template <typename T>
+void SampleRecorder<T>::PushDead(T* sample) {
+  if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
+    dispose(*sample);
+  }
+
+  absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+  absl::MutexLock sample_lock(&sample->init_mu);
+  sample->dead = graveyard_.dead;
+  graveyard_.dead = sample;
+}
+
+template <typename T>
+template <typename... Targs>
+T* SampleRecorder<T>::PopDead(Targs... args) {
+  absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+
+  // The list is circular, so eventually it collapses down to
+  //   graveyard_.dead == &graveyard_
+  // when it is empty.
+  T* sample = graveyard_.dead;
+  if (sample == &graveyard_) return nullptr;
+
+  absl::MutexLock sample_lock(&sample->init_mu);
+  graveyard_.dead = sample->dead;
+  sample->dead = nullptr;
+  sample->PrepareForSampling(std::forward<Targs>(args)...);
+  return sample;
+}
+
+template <typename T>
+template <typename... Targs>
+T* SampleRecorder<T>::Register(Targs&&... args) {
+  size_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
+  if (size > max_samples_.load(std::memory_order_relaxed)) {
+    size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+    dropped_samples_.fetch_add(1, std::memory_order_relaxed);
+    return nullptr;
+  }
+
+  T* sample = PopDead(args...);
+  if (sample == nullptr) {
+    // Resurrection failed.  Hire a new warlock.
+    sample = new T();
+    {
+      absl::MutexLock sample_lock(&sample->init_mu);
+      // If flag initialization happens to occur (perhaps in another thread)
+      // while in this block, it will lock `graveyard_` which is usually always
+      // locked before any sample. This will appear as a lock inversion.
+      // However, this code is run exactly once per sample, and this sample
+      // cannot be accessed until after it is returned from this method.  This
+      // means that this lock state can never be recreated, so we can safely
+      // inform the deadlock detector to ignore it.
+      sample->init_mu.ForgetDeadlockInfo();
+      sample->PrepareForSampling(std::forward<Targs>(args)...);
+    }
+    PushNew(sample);
+  }
+
+  return sample;
+}
+
+template <typename T>
+void SampleRecorder<T>::Unregister(T* sample) {
+  PushDead(sample);
+  size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+}
+
+template <typename T>
+int64_t SampleRecorder<T>::Iterate(
+    const std::function<void(const T& stack)>& f) {
+  T* s = all_.load(std::memory_order_acquire);
+  while (s != nullptr) {
+    absl::MutexLock l(&s->init_mu);
+    if (s->dead == nullptr) {
+      f(*s);
+    }
+    s = s->next;
+  }
+
+  return dropped_samples_.load(std::memory_order_relaxed);
+}
+
+template <typename T>
+void SampleRecorder<T>::SetMaxSamples(size_t max) {
+  max_samples_.store(max, std::memory_order_release);
+}
+
+template <typename T>
+size_t SampleRecorder<T>::GetMaxSamples() const {
+  return max_samples_.load(std::memory_order_acquire);
+}
+
+}  // namespace profiling_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
diff --git a/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc b/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc
new file mode 100644
index 0000000..3373329
--- /dev/null
+++ b/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc
@@ -0,0 +1,184 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/profiling/internal/sample_recorder.h"
+
+#include <atomic>
+#include <random>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+namespace {
+using ::absl::synchronization_internal::ThreadPool;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+struct Info : public Sample<Info> {
+ public:
+  void PrepareForSampling(int64_t w) { weight = w; }
+  std::atomic<size_t> size;
+  absl::Time create_time;
+};
+
+std::vector<size_t> GetSizes(SampleRecorder<Info>* s) {
+  std::vector<size_t> res;
+  s->Iterate([&](const Info& info) {
+    res.push_back(info.size.load(std::memory_order_acquire));
+  });
+  return res;
+}
+
+std::vector<int64_t> GetWeights(SampleRecorder<Info>* s) {
+  std::vector<int64_t> res;
+  s->Iterate([&](const Info& info) { res.push_back(info.weight); });
+  return res;
+}
+
+Info* Register(SampleRecorder<Info>* s, int64_t weight, size_t size) {
+  auto* info = s->Register(weight);
+  assert(info != nullptr);
+  info->size.store(size);
+  return info;
+}
+
+TEST(SampleRecorderTest, Registration) {
+  SampleRecorder<Info> sampler;
+  auto* info1 = Register(&sampler, 31, 1);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
+  EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31));
+
+  auto* info2 = Register(&sampler, 32, 2);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
+  info1->size.store(3);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
+  EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31, 32));
+
+  sampler.Unregister(info1);
+  sampler.Unregister(info2);
+}
+
+TEST(SampleRecorderTest, Unregistration) {
+  SampleRecorder<Info> sampler;
+  std::vector<Info*> infos;
+  for (size_t i = 0; i < 3; ++i) {
+    infos.push_back(Register(&sampler, 33 + i, i));
+  }
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
+  EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 34, 35));
+
+  sampler.Unregister(infos[1]);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
+  EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35));
+
+  infos.push_back(Register(&sampler, 36, 3));
+  infos.push_back(Register(&sampler, 37, 4));
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
+  EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 36, 37));
+  sampler.Unregister(infos[3]);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
+  EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 37));
+
+  sampler.Unregister(infos[0]);
+  sampler.Unregister(infos[2]);
+  sampler.Unregister(infos[4]);
+  EXPECT_THAT(GetSizes(&sampler), IsEmpty());
+}
+
+TEST(SampleRecorderTest, MultiThreaded) {
+  SampleRecorder<Info> sampler;
+  Notification stop;
+  ThreadPool pool(10);
+
+  for (int i = 0; i < 10; ++i) {
+    pool.Schedule([&sampler, &stop, i]() {
+      std::random_device rd;
+      std::mt19937 gen(rd());
+
+      std::vector<Info*> infoz;
+      while (!stop.HasBeenNotified()) {
+        if (infoz.empty()) {
+          infoz.push_back(sampler.Register(i));
+        }
+        switch (std::uniform_int_distribution<>(0, 2)(gen)) {
+          case 0: {
+            infoz.push_back(sampler.Register(i));
+            break;
+          }
+          case 1: {
+            size_t p =
+                std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
+            Info* info = infoz[p];
+            infoz[p] = infoz.back();
+            infoz.pop_back();
+            EXPECT_EQ(info->weight, i);
+            sampler.Unregister(info);
+            break;
+          }
+          case 2: {
+            absl::Duration oldest = absl::ZeroDuration();
+            sampler.Iterate([&](const Info& info) {
+              oldest = std::max(oldest, absl::Now() - info.create_time);
+            });
+            ASSERT_GE(oldest, absl::ZeroDuration());
+            break;
+          }
+        }
+      }
+    });
+  }
+  // The threads will hammer away.  Give it a little bit of time for tsan to
+  // spot errors.
+  absl::SleepFor(absl::Seconds(3));
+  stop.Notify();
+}
+
+TEST(SampleRecorderTest, Callback) {
+  SampleRecorder<Info> sampler;
+
+  auto* info1 = Register(&sampler, 39, 1);
+  auto* info2 = Register(&sampler, 40, 2);
+
+  static const Info* expected;
+
+  auto callback = [](const Info& info) {
+    // We can't use `info` outside of this callback because the object will be
+    // disposed as soon as we return from here.
+    EXPECT_EQ(&info, expected);
+  };
+
+  // Set the callback.
+  EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
+  expected = info1;
+  sampler.Unregister(info1);
+
+  // Unset the callback.
+  EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
+  expected = nullptr;  // no more calls.
+  sampler.Unregister(info2);
+}
+
+}  // namespace
+}  // namespace profiling_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/random/BUILD.bazel b/abseil-cpp/absl/random/BUILD.bazel
index 81e150e..19130ff 100644
--- a/abseil-cpp/absl/random/BUILD.bazel
+++ b/abseil-cpp/absl/random/BUILD.bazel
@@ -16,7 +16,6 @@
 
 # ABSL random-number generation libraries.
 
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -69,6 +68,7 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/random/internal:distribution_caller",
         "//absl/random/internal:fast_uniform_bits",
         "//absl/random/internal:fastmath",
@@ -100,8 +100,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":seed_gen_exception",
-        "//absl/container:inlined_vector",
-        "//absl/random/internal:nonsecure_base",
+        "//absl/base:config",
         "//absl/random/internal:pool_urbg",
         "//absl/random/internal:salted_seed_seq",
         "//absl/random/internal:seed_material",
@@ -128,6 +127,7 @@
     name = "mock_distributions",
     testonly = 1,
     hdrs = ["mock_distributions.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":distributions",
         ":mocking_bit_gen",
@@ -183,10 +183,14 @@
     copts = ABSL_TEST_COPTS,
     flaky = 1,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":distributions",
         ":random",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/numeric:representation",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -234,10 +238,13 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",  # Does not converge on WASM.
+    ],
     deps = [
         ":distributions",
         ":random",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -258,7 +265,7 @@
     deps = [
         ":distributions",
         ":random",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -285,8 +292,8 @@
         ":distributions",
         ":random",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
         "//absl/container:flat_hash_map",
+        "//absl/log",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -299,6 +306,7 @@
 cc_test(
     name = "exponential_distribution_test",
     size = "small",
+    timeout = "moderate",
     srcs = ["exponential_distribution_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -306,7 +314,8 @@
         ":distributions",
         ":random",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/numeric:representation",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -329,7 +338,8 @@
         ":distributions",
         ":random",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/numeric:representation",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:sequence_urbg",
         "//absl/strings",
@@ -350,7 +360,7 @@
     deps = [
         ":distributions",
         ":random",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -375,7 +385,8 @@
     deps = [
         ":distributions",
         ":random",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/numeric:representation",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -395,7 +406,7 @@
     deps = [
         ":distributions",
         ":random",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/random/internal:distribution_test_util",
         "//absl/random/internal:pcg_engine",
         "//absl/random/internal:sequence_urbg",
@@ -425,6 +436,9 @@
     srcs = ["mocking_bit_gen_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":bit_gen_ref",
         ":mock_distributions",
@@ -440,6 +454,9 @@
     srcs = ["mock_distributions_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":mock_distributions",
         ":mocking_bit_gen",
@@ -454,6 +471,9 @@
     srcs = ["examples_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":random",
         "@com_google_googletest//:gtest_main",
@@ -474,18 +494,6 @@
     ],
 )
 
-BENCHMARK_TAGS = [
-    "benchmark",
-    "no_test_android_arm",
-    "no_test_android_arm64",
-    "no_test_android_x86",
-    "no_test_darwin_x86_64",
-    "no_test_ios_x86_64",
-    "no_test_loonix",
-    "no_test_msvc_x64",
-    "no_test_wasm",
-]
-
 # Benchmarks for various methods / test utilities
 cc_binary(
     name = "benchmarks",
@@ -495,7 +503,7 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = BENCHMARK_TAGS,
+    tags = ["benchmark"],
     deps = [
         ":distributions",
         ":random",
diff --git a/abseil-cpp/absl/random/CMakeLists.txt b/abseil-cpp/absl/random/CMakeLists.txt
index 7d7bec8..bd363d8 100644
--- a/abseil-cpp/absl/random/CMakeLists.txt
+++ b/abseil-cpp/absl/random/CMakeLists.txt
@@ -62,8 +62,8 @@
     absl::random_random
     absl::random_internal_sequence_urbg
     absl::fast_type_id
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -119,8 +119,9 @@
     absl::type_traits
     absl::utility
     absl::variant
-    gmock
-    gtest
+    GTest::gmock
+    GTest::gtest
+  PUBLIC
   TESTONLY
 )
 
@@ -136,8 +137,8 @@
   DEPS
     absl::random_mocking_bit_gen
     absl::random_random
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -153,8 +154,8 @@
     absl::random_bit_gen_ref
     absl::random_mocking_bit_gen
     absl::random_random
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_library(
@@ -222,8 +223,8 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::config
     absl::inlined_vector
-    absl::random_internal_nonsecure_base
     absl::random_internal_pool_urbg
     absl::random_internal_salted_seed_seq
     absl::random_internal_seed_material
@@ -245,8 +246,8 @@
     absl::random_random
     absl::random_internal_sequence_urbg
     absl::random_internal_pcg_engine
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -259,16 +260,17 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
+    absl::numeric_representation
     absl::random_distributions
     absl::random_random
     absl::random_internal_distribution_test_util
     absl::random_internal_sequence_urbg
     absl::random_internal_pcg_engine
-    absl::raw_logging_internal
     absl::strings
     absl::str_format
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -284,8 +286,8 @@
     absl::random_distributions
     absl::random_random
     absl::random_internal_distribution_test_util
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -297,11 +299,12 @@
     ${ABSL_TEST_COPTS}
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
     absl::random_distributions
     absl::random_random
     absl::raw_logging_internal
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -313,16 +316,17 @@
     ${ABSL_TEST_COPTS}
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::log
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
     absl::random_random
-    absl::raw_logging_internal
     absl::strings
     absl::str_format
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -335,15 +339,15 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
     absl::random_random
-    absl::raw_logging_internal
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -360,14 +364,14 @@
     absl::random_random
     absl::core_headers
     absl::flat_hash_map
+    absl::log
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
-    absl::raw_logging_internal
     absl::strings
     absl::str_format
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -381,16 +385,17 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::core_headers
+    absl::log
+    absl::numeric_representation
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
     absl::random_random
-    absl::raw_logging_internal
     absl::strings
     absl::str_format
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -404,15 +409,16 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::core_headers
+    absl::log
+    absl::numeric_representation
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_sequence_urbg
     absl::random_random
-    absl::raw_logging_internal
     absl::strings
     absl::str_format
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -425,15 +431,15 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
     absl::random_random
-    absl::raw_logging_internal
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -446,14 +452,16 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
+    absl::numeric_representation
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
     absl::random_random
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -466,15 +474,15 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
     absl::random_distributions
     absl::random_internal_distribution_test_util
     absl::random_internal_pcg_engine
     absl::random_internal_sequence_urbg
     absl::random_random
-    absl::raw_logging_internal
     absl::strings
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -488,7 +496,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_random
-    gtest_main
+    GTest::gtest_main
 )
 
 absl_cc_test(
@@ -504,8 +512,8 @@
     absl::random_seed_sequences
     absl::random_internal_nonsecure_base
     absl::random_random
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -564,7 +572,7 @@
     ${ABSL_DEFAULT_COPTS}
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
-    $<$<BOOL:${MINGW}>:"bcrypt">
+    $<$<BOOL:${MINGW}>:-lbcrypt>
   DEPS
     absl::core_headers
     absl::optional
@@ -611,6 +619,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::config
+    absl::endian
   TESTONLY
 )
 
@@ -721,7 +730,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::core_headers
-    absl::optional
+    absl::inlined_vector
     absl::random_internal_pool_urbg
     absl::random_internal_salted_seed_seq
     absl::random_internal_seed_material
@@ -758,6 +767,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::endian
     absl::random_internal_iostream_state_saver
     absl::random_internal_randen
     absl::raw_logging_internal
@@ -888,7 +898,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_traits
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -905,7 +915,7 @@
     absl::bits
     absl::flags
     absl::random_internal_generate_real
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -920,7 +930,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_distribution_test_util
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -935,7 +945,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_fastmath
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -951,8 +961,8 @@
   DEPS
     absl::random_internal_explicit_seed_seq
     absl::random_seed_sequences
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -967,8 +977,8 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_salted_seed_seq
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -984,7 +994,7 @@
   DEPS
     absl::core_headers
     absl::random_internal_distribution_test_util
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -999,7 +1009,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_fast_uniform_bits
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1018,7 +1028,7 @@
     absl::random_distributions
     absl::random_seed_sequences
     absl::strings
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1033,8 +1043,8 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_seed_material
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1051,7 +1061,7 @@
     absl::random_internal_pool_urbg
     absl::span
     absl::type_traits
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1068,8 +1078,8 @@
     absl::random_internal_explicit_seed_seq
     absl::random_internal_pcg_engine
     absl::time
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1083,13 +1093,13 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
     absl::random_internal_explicit_seed_seq
     absl::random_internal_randen_engine
-    absl::raw_logging_internal
     absl::strings
     absl::time
-    gmock
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1105,7 +1115,7 @@
   DEPS
     absl::random_internal_randen
     absl::type_traits
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1119,8 +1129,9 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::endian
     absl::random_internal_randen_slow
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1134,13 +1145,13 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::log
     absl::random_internal_platform
     absl::random_internal_randen_hwaes
     absl::random_internal_randen_hwaes_impl
-    absl::raw_logging_internal
     absl::str_format
-    gmock
-    gtest
+    GTest::gmock
+    GTest::gtest
 )
 
 # Internal-only target, do not depend on directly.
@@ -1171,7 +1182,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_uniform_helper
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1186,7 +1197,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::random_internal_iostream_state_saver
-    gtest_main
+    GTest::gtest_main
 )
 
 # Internal-only target, do not depend on directly.
@@ -1203,5 +1214,6 @@
     absl::random_internal_wide_multiply
     absl::bits
     absl::int128
-    gtest_main
+    GTest::gmock
+    GTest::gtest_main
 )
diff --git a/abseil-cpp/absl/random/benchmarks.cc b/abseil-cpp/absl/random/benchmarks.cc
index 87bbb98..0900e81 100644
--- a/abseil-cpp/absl/random/benchmarks.cc
+++ b/abseil-cpp/absl/random/benchmarks.cc
@@ -62,7 +62,7 @@
  public:
   using result_type = uint32_t;
 
-  PrecompiledSeedSeq() {}
+  PrecompiledSeedSeq() = default;
 
   template <typename Iterator>
   PrecompiledSeedSeq(Iterator begin, Iterator end) {}
diff --git a/abseil-cpp/absl/random/bernoulli_distribution.h b/abseil-cpp/absl/random/bernoulli_distribution.h
index 25bd0d5..d81b6ae 100644
--- a/abseil-cpp/absl/random/bernoulli_distribution.h
+++ b/abseil-cpp/absl/random/bernoulli_distribution.h
@@ -138,16 +138,16 @@
     // 64 bits.
     //
     // Second, `c` is constructed by first casting explicitly to a signed
-    // integer and then converting implicitly to an unsigned integer of the same
+    // integer and then casting explicitly to an unsigned integer of the same
     // size.  This is done because the hardware conversion instructions produce
     // signed integers from double; if taken as a uint64_t the conversion would
     // be wrong for doubles greater than 2^63 (not relevant in this use-case).
     // If converted directly to an unsigned integer, the compiler would end up
     // emitting code to handle such large values that are not relevant due to
     // the known bounds on `c`.  To avoid these extra instructions this
-    // implementation converts first to the signed type and then use the
-    // implicit conversion to unsigned (which is a no-op).
-    const uint64_t c = static_cast<int64_t>(p * kP32);
+    // implementation converts first to the signed type and then convert to
+    // unsigned (which is a no-op).
+    const uint64_t c = static_cast<uint64_t>(static_cast<int64_t>(p * kP32));
     const uint32_t v = fast_u32(g);
     // FAST PATH: this path fails with probability 1/2^32.  Note that simply
     // returning v <= c would approximate P very well (up to an absolute error
diff --git a/abseil-cpp/absl/random/beta_distribution_test.cc b/abseil-cpp/absl/random/beta_distribution_test.cc
index 277e4dc..c93b2a3 100644
--- a/abseil-cpp/absl/random/beta_distribution_test.cc
+++ b/abseil-cpp/absl/random/beta_distribution_test.cc
@@ -15,18 +15,21 @@
 #include "absl/random/beta_distribution.h"
 
 #include <algorithm>
+#include <cfloat>
 #include <cstddef>
 #include <cstdint>
 #include <iterator>
 #include <random>
 #include <sstream>
 #include <string>
+#include <type_traits>
 #include <unordered_map>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
+#include "absl/numeric/internal/representation.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -42,8 +45,26 @@
 template <typename IntType>
 class BetaDistributionInterfaceTest : public ::testing::Test {};
 
-using RealTypes = ::testing::Types<float, double, long double>;
-TYPED_TEST_CASE(BetaDistributionInterfaceTest, RealTypes);
+constexpr bool ShouldExerciseLongDoubleTests() {
+  // long double arithmetic is not supported well by either GCC or Clang on
+  // most platforms specifically not when implemented in terms of double-double;
+  // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
+  // https://bugs.llvm.org/show_bug.cgi?id=49131, and
+  // https://bugs.llvm.org/show_bug.cgi?id=49132.
+  // So a conservative choice here is to disable long-double tests pretty much
+  // everywhere except on x64 but only if long double is not implemented as
+  // double-double.
+#if defined(__i686__) && defined(__x86_64__)
+  return !absl::numeric_internal::IsDoubleDouble();
+#else
+  return false;
+#endif
+}
+
+using RealTypes = std::conditional<ShouldExerciseLongDoubleTests(),
+                                   ::testing::Types<float, double, long double>,
+                                   ::testing::Types<float, double>>::type;
+TYPED_TEST_SUITE(BetaDistributionInterfaceTest, RealTypes);
 
 TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) {
   // The threshold for whether std::exp(1/a) is finite.
@@ -53,9 +74,6 @@
   const TypeParam kLargeA =
       std::exp(std::log((std::numeric_limits<TypeParam>::max)()) -
                std::log(std::log((std::numeric_limits<TypeParam>::max)())));
-  const TypeParam kLargeAPPC = std::exp(
-      std::log((std::numeric_limits<TypeParam>::max)()) -
-      std::log(std::log((std::numeric_limits<TypeParam>::max)())) - 10.0f);
   using param_type = typename absl::beta_distribution<TypeParam>::param_type;
 
   constexpr int kCount = 1000;
@@ -76,9 +94,6 @@
       kLargeA,                                //
       std::nextafter(kLargeA, TypeParam(0)),  //
       std::nextafter(kLargeA, std::numeric_limits<TypeParam>::max()),
-      kLargeAPPC,  //
-      std::nextafter(kLargeAPPC, TypeParam(0)),
-      std::nextafter(kLargeAPPC, std::numeric_limits<TypeParam>::max()),
       // Boundary cases.
       std::numeric_limits<TypeParam>::max(),
       std::numeric_limits<TypeParam>::epsilon(),
@@ -92,8 +107,8 @@
   };
   for (TypeParam alpha : kValues) {
     for (TypeParam beta : kValues) {
-      ABSL_INTERNAL_LOG(
-          INFO, absl::StrFormat("Smoke test for Beta(%a, %a)", alpha, beta));
+      LOG(INFO) << absl::StreamFormat("Smoke test for Beta(%a, %a)", alpha,
+                                      beta);
 
       param_type param(alpha, beta);
       absl::beta_distribution<TypeParam> before(alpha, beta);
@@ -125,28 +140,6 @@
 
       ss >> after;
 
-#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \
-    defined(__ppc__) || defined(__PPC__)
-      if (std::is_same<TypeParam, long double>::value) {
-        // Roundtripping floating point values requires sufficient precision
-        // to reconstruct the exact value. It turns out that long double
-        // has some errors doing this on ppc.
-        if (alpha <= std::numeric_limits<double>::max() &&
-            alpha >= std::numeric_limits<double>::lowest()) {
-          EXPECT_EQ(static_cast<double>(before.alpha()),
-                    static_cast<double>(after.alpha()))
-              << ss.str();
-        }
-        if (beta <= std::numeric_limits<double>::max() &&
-            beta >= std::numeric_limits<double>::lowest()) {
-          EXPECT_EQ(static_cast<double>(before.beta()),
-                    static_cast<double>(after.beta()))
-              << ss.str();
-        }
-        continue;
-      }
-#endif
-
       EXPECT_EQ(before.alpha(), after.alpha());
       EXPECT_EQ(before.beta(), after.beta());
       EXPECT_EQ(before, after)           //
@@ -334,15 +327,13 @@
       absl::random_internal::Near("z", z_mean, 0.0, max_err) &&
       absl::random_internal::Near("z_variance", z_variance, 0.0, max_err);
   if (!pass) {
-    ABSL_INTERNAL_LOG(
-        INFO,
-        absl::StrFormat(
-            "Beta(%f, %f), "
-            "mean: sample %f, expect %f, which is %f stddevs away, "
-            "variance: sample %f, expect %f, which is %f stddevs away.",
-            alpha_, beta_, m.mean, Mean(),
-            std::abs(m.mean - Mean()) / mean_stddev, m.variance, Variance(),
-            std::abs(m.variance - Variance()) / variance_stddev));
+    LOG(INFO) << "Beta(" << alpha_ << ", " << beta_ << "), mean: sample "
+              << m.mean << ", expect " << Mean() << ", which is "
+              << std::abs(m.mean - Mean()) / mean_stddev
+              << " stddevs away, variance: sample " << m.variance << ", expect "
+              << Variance() << ", which is "
+              << std::abs(m.variance - Variance()) / variance_stddev
+              << " stddevs away.";
   }
   return pass;
 }
@@ -403,18 +394,15 @@
   const bool pass =
       (absl::random_internal::ChiSquarePValue(chi_square, dof) >= p);
   if (!pass) {
-    for (int i = 0; i < cutoffs.size(); i++) {
-      ABSL_INTERNAL_LOG(
-          INFO, absl::StrFormat("cutoff[%d] = %f, actual count %d, expected %d",
-                                i, cutoffs[i], counts[i],
-                                static_cast<int>(expected[i])));
+    for (size_t i = 0; i < cutoffs.size(); i++) {
+      LOG(INFO) << "cutoff[" << i << "] = " << cutoffs[i] << ", actual count "
+                << counts[i] << ", expected " << static_cast<int>(expected[i]);
     }
 
-    ABSL_INTERNAL_LOG(
-        INFO, absl::StrFormat(
-                  "Beta(%f, %f) %s %f, p = %f", alpha_, beta_,
-                  absl::random_internal::kChiSquared, chi_square,
-                  absl::random_internal::ChiSquarePValue(chi_square, dof)));
+    LOG(INFO) << "Beta(" << alpha_ << ", " << beta_ << ") "
+              << absl::random_internal::kChiSquared << " " << chi_square
+              << ", p = "
+              << absl::random_internal::ChiSquarePValue(chi_square, dof);
   }
   return pass;
 }
@@ -448,13 +436,13 @@
   return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
 }
 
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
     TestSampleStatisticsCombinations, BetaDistributionTest,
     ::testing::Combine(::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4),
                        ::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4)),
     ParamName);
 
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
     TestSampleStatistics_SelectedPairs, BetaDistributionTest,
     ::testing::Values(std::make_pair(0.5, 1000), std::make_pair(1000, 0.5),
                       std::make_pair(900, 1000), std::make_pair(10000, 20000),
@@ -576,6 +564,14 @@
 // dependencies of the distribution change, such as RandU64ToDouble, then this
 // is also likely to change.
 TEST(BetaDistributionTest, AlgorithmBounds) {
+#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
+  // We're using an x87-compatible FPU, and intermediate operations are
+  // performed with 80-bit floats. This produces slightly different results from
+  // what we expect below.
+  GTEST_SKIP()
+      << "Skipping the test because we detected x87 floating-point semantics";
+#endif
+
   {
     absl::random_internal::sequence_urbg urbg(
         {0x7fbe76c8b4395800ull, 0x8000000000000000ull});
diff --git a/abseil-cpp/absl/random/bit_gen_ref.h b/abseil-cpp/absl/random/bit_gen_ref.h
index 9555460..e475221 100644
--- a/abseil-cpp/absl/random/bit_gen_ref.h
+++ b/abseil-cpp/absl/random/bit_gen_ref.h
@@ -24,6 +24,10 @@
 #ifndef ABSL_RANDOM_BIT_GEN_REF_H_
 #define ABSL_RANDOM_BIT_GEN_REF_H_
 
+#include <limits>
+#include <type_traits>
+#include <utility>
+
 #include "absl/base/internal/fast_type_id.h"
 #include "absl/base/macros.h"
 #include "absl/meta/type_traits.h"
diff --git a/abseil-cpp/absl/random/discrete_distribution_test.cc b/abseil-cpp/absl/random/discrete_distribution_test.cc
index 6d00700..32405ea 100644
--- a/abseil-cpp/absl/random/discrete_distribution_test.cc
+++ b/abseil-cpp/absl/random/discrete_distribution_test.cc
@@ -26,7 +26,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -99,6 +99,7 @@
 }
 
 TEST(DiscreteDistributionTest, InitDiscreteDistribution) {
+  using testing::_;
   using testing::Pair;
 
   {
@@ -111,8 +112,8 @@
     // Each bucket is p=1/3, so bucket 0 will send half it's traffic
     // to bucket 2, while the rest will retain all of their traffic.
     EXPECT_THAT(q, testing::ElementsAre(Pair(0.5, 2),  //
-                                        Pair(1.0, 1),  //
-                                        Pair(1.0, 2)));
+                                        Pair(1.0, _),  //
+                                        Pair(1.0, _)));
   }
 
   {
@@ -135,7 +136,7 @@
 
     EXPECT_THAT(q, testing::ElementsAre(Pair(b0, 3),   //
                                         Pair(b1, 3),   //
-                                        Pair(1.0, 2),  //
+                                        Pair(1.0, _),  //
                                         Pair(b3, 2),   //
                                         Pair(b1, 3)));
   }
@@ -145,7 +146,7 @@
   using absl::random_internal::kChiSquared;
 
   constexpr size_t kTrials = 10000;
-  constexpr int kBuckets = 50;  // inclusive, so actally +1
+  constexpr int kBuckets = 50;  // inclusive, so actually +1
 
   // 1-in-100000 threshold, but remember, there are about 8 tests
   // in this file. And the test could fail for other reasons.
@@ -193,7 +194,7 @@
     absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
     absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
                     kThreshold);
-    ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+    LOG(INFO) << msg;
     FAIL() << msg;
   }
 }
diff --git a/abseil-cpp/absl/random/distributions.h b/abseil-cpp/absl/random/distributions.h
index 31c7969..37fc3aa 100644
--- a/abseil-cpp/absl/random/distributions.h
+++ b/abseil-cpp/absl/random/distributions.h
@@ -373,7 +373,7 @@
 template <typename IntType, typename URBG>
 IntType LogUniform(URBG&& urbg,  // NOLINT(runtime/references)
                    IntType lo, IntType hi, IntType base = 2) {
-  static_assert(std::is_integral<IntType>::value,
+  static_assert(random_internal::IsIntegral<IntType>::value,
                 "Template-argument 'IntType' must be an integral type, in "
                 "absl::LogUniform<IntType, URBG>(...)");
 
@@ -403,7 +403,7 @@
 template <typename IntType, typename URBG>
 IntType Poisson(URBG&& urbg,  // NOLINT(runtime/references)
                 double mean = 1.0) {
-  static_assert(std::is_integral<IntType>::value,
+  static_assert(random_internal::IsIntegral<IntType>::value,
                 "Template-argument 'IntType' must be an integral type, in "
                 "absl::Poisson<IntType, URBG>(...)");
 
@@ -435,7 +435,7 @@
 IntType Zipf(URBG&& urbg,  // NOLINT(runtime/references)
              IntType hi = (std::numeric_limits<IntType>::max)(), double q = 2.0,
              double v = 1.0) {
-  static_assert(std::is_integral<IntType>::value,
+  static_assert(random_internal::IsIntegral<IntType>::value,
                 "Template-argument 'IntType' must be an integral type, in "
                 "absl::Zipf<IntType, URBG>(...)");
 
diff --git a/abseil-cpp/absl/random/distributions_test.cc b/abseil-cpp/absl/random/distributions_test.cc
index 5866a07..5321a11 100644
--- a/abseil-cpp/absl/random/distributions_test.cc
+++ b/abseil-cpp/absl/random/distributions_test.cc
@@ -14,6 +14,7 @@
 
 #include "absl/random/distributions.h"
 
+#include <cfloat>
 #include <cmath>
 #include <cstdint>
 #include <random>
@@ -219,11 +220,21 @@
   absl::Uniform<uint16_t>(gen);
   absl::Uniform<uint32_t>(gen);
   absl::Uniform<uint64_t>(gen);
+  absl::Uniform<absl::uint128>(gen);
 }
 
 TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {
   // The ranges used in this test are undefined behavior.
   // The results are arbitrary and subject to future changes.
+
+#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
+  // We're using an x87-compatible FPU, and intermediate operations can be
+  // performed with 80-bit floats. This produces slightly different results from
+  // what we expect below.
+  GTEST_SKIP()
+      << "Skipping the test because we detected x87 floating-point semantics";
+#endif
+
   absl::InsecureBitGen gen;
 
   // <uint>
diff --git a/abseil-cpp/absl/random/exponential_distribution_test.cc b/abseil-cpp/absl/random/exponential_distribution_test.cc
index 8e9e69b..fb9a0d1 100644
--- a/abseil-cpp/absl/random/exponential_distribution_test.cc
+++ b/abseil-cpp/absl/random/exponential_distribution_test.cc
@@ -15,6 +15,7 @@
 #include "absl/random/exponential_distribution.h"
 
 #include <algorithm>
+#include <cfloat>
 #include <cmath>
 #include <cstddef>
 #include <cstdint>
@@ -28,8 +29,9 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
+#include "absl/log/log.h"
+#include "absl/numeric/internal/representation.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -47,12 +49,16 @@
 template <typename RealType>
 class ExponentialDistributionTypedTest : public ::testing::Test {};
 
-#if defined(__EMSCRIPTEN__)
-using RealTypes = ::testing::Types<float, double>;
-#else
-using RealTypes = ::testing::Types<float, double, long double>;
-#endif  // defined(__EMSCRIPTEN__)
-TYPED_TEST_CASE(ExponentialDistributionTypedTest, RealTypes);
+// double-double arithmetic is not supported well by either GCC or Clang; see
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
+// https://bugs.llvm.org/show_bug.cgi?id=49131, and
+// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests
+// with double doubles until compiler support is better.
+using RealTypes =
+    std::conditional<absl::numeric_internal::IsDoubleDouble(),
+                     ::testing::Types<float, double>,
+                     ::testing::Types<float, double, long double>>::type;
+TYPED_TEST_SUITE(ExponentialDistributionTypedTest, RealTypes);
 
 TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) {
   using param_type =
@@ -109,9 +115,8 @@
       if (sample < sample_min) sample_min = sample;
     }
     if (!std::is_same<TypeParam, long double>::value) {
-      ABSL_INTERNAL_LOG(INFO,
-                        absl::StrFormat("Range {%f}: %f, %f, lambda=%f", lambda,
-                                        sample_min, sample_max, lambda));
+      LOG(INFO) << "Range {" << lambda << "}: " << sample_min << ", "
+                << sample_max << ", lambda=" << lambda;
     }
 
     std::stringstream ss;
@@ -130,23 +135,6 @@
 
     ss >> after;
 
-#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \
-    defined(__ppc__) || defined(__PPC__)
-    if (std::is_same<TypeParam, long double>::value) {
-      // Roundtripping floating point values requires sufficient precision to
-      // reconstruct the exact value. It turns out that long double has some
-      // errors doing this on ppc, particularly for values
-      // near {1.0 +/- epsilon}.
-      if (lambda <= std::numeric_limits<double>::max() &&
-          lambda >= std::numeric_limits<double>::lowest()) {
-        EXPECT_EQ(static_cast<double>(before.lambda()),
-                  static_cast<double>(after.lambda()))
-            << ss.str();
-      }
-      continue;
-    }
-#endif
-
     EXPECT_EQ(before.lambda(), after.lambda())  //
         << ss.str() << " "                      //
         << (ss.good() ? "good " : "")           //
@@ -230,17 +218,16 @@
   const bool pass = absl::random_internal::Near("z", z, 0.0, max_err);
 
   if (!pass) {
-    ABSL_INTERNAL_LOG(
-        INFO, absl::StrFormat("p=%f max_err=%f\n"
-                              " lambda=%f\n"
-                              " mean=%f vs. %f\n"
-                              " stddev=%f vs. %f\n"
-                              " skewness=%f vs. %f\n"
-                              " kurtosis=%f vs. %f\n"
-                              " z=%f vs. 0",
-                              p, max_err, lambda(), m.mean, mean(),
-                              std::sqrt(m.variance), stddev(), m.skewness,
-                              skew(), m.kurtosis, kurtosis(), z));
+    // clang-format off
+    LOG(INFO)
+        << "p=" << p << " max_err=" << max_err << "\n"
+           " lambda=" << lambda() << "\n"
+           " mean=" << m.mean << " vs. " << mean() << "\n"
+           " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
+           " skewness=" << m.skewness << " vs. " << skew() << "\n"
+           " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
+           " z=" << z << " vs. 0";
+    // clang-format on
   }
   return pass;
 }
@@ -285,16 +272,16 @@
   double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
 
   if (chi_square > threshold) {
-    for (int i = 0; i < cutoffs.size(); i++) {
-      ABSL_INTERNAL_LOG(
-          INFO, absl::StrFormat("%d : (%f) = %d", i, cutoffs[i], counts[i]));
+    for (size_t i = 0; i < cutoffs.size(); i++) {
+      LOG(INFO) << i << " : (" << cutoffs[i] << ") = " << counts[i];
     }
 
-    ABSL_INTERNAL_LOG(INFO,
-                      absl::StrCat("lambda ", lambda(), "\n",     //
-                                   " expected ", expected, "\n",  //
-                                   kChiSquared, " ", chi_square, " (", p, ")\n",
-                                   kChiSquared, " @ 0.98 = ", threshold));
+    // clang-format off
+    LOG(INFO) << "lambda " << lambda() << "\n"
+                 " expected " << expected << "\n"
+              << kChiSquared << " " << chi_square << " (" << p << ")\n"
+              << kChiSquared << " @ 0.98 = " << threshold;
+    // clang-format on
   }
   return p;
 }
@@ -354,8 +341,8 @@
   return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
 }
 
-INSTANTIATE_TEST_CASE_P(All, ExponentialDistributionTests,
-                        ::testing::ValuesIn(GenParams()), ParamName);
+INSTANTIATE_TEST_SUITE_P(All, ExponentialDistributionTests,
+                         ::testing::ValuesIn(GenParams()), ParamName);
 
 // NOTE: absl::exponential_distribution is not guaranteed to be stable.
 TEST(ExponentialDistributionTest, StabilityTest) {
@@ -396,6 +383,15 @@
 TEST(ExponentialDistributionTest, AlgorithmBounds) {
   // Relies on absl::uniform_real_distribution, so some of these comments
   // reference that.
+
+#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
+  // We're using an x87-compatible FPU, and intermediate operations can be
+  // performed with 80-bit floats. This produces slightly different results from
+  // what we expect below.
+  GTEST_SKIP()
+      << "Skipping the test because we detected x87 floating-point semantics";
+#endif
+
   absl::exponential_distribution<double> dist;
 
   {
diff --git a/abseil-cpp/absl/random/gaussian_distribution_test.cc b/abseil-cpp/absl/random/gaussian_distribution_test.cc
index 02ac578..bad3476 100644
--- a/abseil-cpp/absl/random/gaussian_distribution_test.cc
+++ b/abseil-cpp/absl/random/gaussian_distribution_test.cc
@@ -21,12 +21,14 @@
 #include <iterator>
 #include <random>
 #include <string>
+#include <type_traits>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
+#include "absl/log/log.h"
+#include "absl/numeric/internal/representation.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/sequence_urbg.h"
@@ -43,8 +45,16 @@
 template <typename RealType>
 class GaussianDistributionInterfaceTest : public ::testing::Test {};
 
-using RealTypes = ::testing::Types<float, double, long double>;
-TYPED_TEST_CASE(GaussianDistributionInterfaceTest, RealTypes);
+// double-double arithmetic is not supported well by either GCC or Clang; see
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
+// https://bugs.llvm.org/show_bug.cgi?id=49131, and
+// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests
+// with double doubles until compiler support is better.
+using RealTypes =
+    std::conditional<absl::numeric_internal::IsDoubleDouble(),
+                     ::testing::Types<float, double>,
+                     ::testing::Types<float, double, long double>>::type;
+TYPED_TEST_SUITE(GaussianDistributionInterfaceTest, RealTypes);
 
 TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) {
   using param_type =
@@ -106,9 +116,8 @@
           EXPECT_LE(sample, before.max()) << before;
         }
         if (!std::is_same<TypeParam, long double>::value) {
-          ABSL_INTERNAL_LOG(
-              INFO, absl::StrFormat("Range{%f, %f}: %f, %f", mean, stddev,
-                                    sample_min, sample_max));
+          LOG(INFO) << "Range{" << mean << ", " << stddev << "}: " << sample_min
+                    << ", " << sample_max;
         }
 
         std::stringstream ss;
@@ -129,32 +138,6 @@
 
         ss >> after;
 
-#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \
-    defined(__ppc__) || defined(__PPC__) || defined(__EMSCRIPTEN__)
-        if (std::is_same<TypeParam, long double>::value) {
-          // Roundtripping floating point values requires sufficient precision
-          // to reconstruct the exact value.  It turns out that long double
-          // has some errors doing this on ppc, particularly for values
-          // near {1.0 +/- epsilon}.
-          //
-          // Emscripten is even worse, implementing long double as a 128-bit
-          // type, but shipping with a strtold() that doesn't support that.
-          if (mean <= std::numeric_limits<double>::max() &&
-              mean >= std::numeric_limits<double>::lowest()) {
-            EXPECT_EQ(static_cast<double>(before.mean()),
-                      static_cast<double>(after.mean()))
-                << ss.str();
-          }
-          if (stddev <= std::numeric_limits<double>::max() &&
-              stddev >= std::numeric_limits<double>::lowest()) {
-            EXPECT_EQ(static_cast<double>(before.stddev()),
-                      static_cast<double>(after.stddev()))
-                << ss.str();
-          }
-          continue;
-        }
-#endif
-
         EXPECT_EQ(before.mean(), after.mean());
         EXPECT_EQ(before.stddev(), after.stddev())  //
             << ss.str() << " "                      //
@@ -256,17 +239,16 @@
       (std::pow(m.skewness, 2.0) + std::pow(m.kurtosis - 3.0, 2.0) / 4.0);
 
   if (!pass || jb > 9.21) {
-    ABSL_INTERNAL_LOG(
-        INFO, absl::StrFormat("p=%f max_err=%f\n"
-                              " mean=%f vs. %f\n"
-                              " stddev=%f vs. %f\n"
-                              " skewness=%f vs. %f\n"
-                              " kurtosis=%f vs. %f\n"
-                              " z=%f vs. 0\n"
-                              " jb=%f vs. 9.21",
-                              p, max_err, m.mean, mean(), std::sqrt(m.variance),
-                              stddev(), m.skewness, skew(), m.kurtosis,
-                              kurtosis(), z, jb));
+    // clang-format off
+    LOG(INFO)
+        << "p=" << p << " max_err=" << max_err << "\n"
+           " mean=" << m.mean << " vs. " << mean() << "\n"
+           " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
+           " skewness=" << m.skewness << " vs. " << skew() << "\n"
+           " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
+           " z=" << z << " vs. 0\n"
+           " jb=" << jb << " vs. 9.21";
+    // clang-format on
   }
   return pass;
 }
@@ -313,16 +295,16 @@
 
   // Log if the chi_square value is above the threshold.
   if (chi_square > threshold) {
-    for (int i = 0; i < cutoffs.size(); i++) {
-      ABSL_INTERNAL_LOG(
-          INFO, absl::StrFormat("%d : (%f) = %d", i, cutoffs[i], counts[i]));
+    for (size_t i = 0; i < cutoffs.size(); i++) {
+      LOG(INFO) << i << " : (" << cutoffs[i] << ") = " << counts[i];
     }
 
-    ABSL_INTERNAL_LOG(
-        INFO, absl::StrCat("mean=", mean(), " stddev=", stddev(), "\n",   //
-                           " expected ", expected, "\n",                  //
-                           kChiSquared, " ", chi_square, " (", p, ")\n",  //
-                           kChiSquared, " @ 0.98 = ", threshold));
+    // clang-format off
+    LOG(INFO) << "mean=" << mean() << " stddev=" << stddev() << "\n"
+                 " expected " << expected << "\n"
+              << kChiSquared << " " << chi_square << " (" << p << ")\n"
+              << kChiSquared << " @ 0.98 = " << threshold;
+    // clang-format on
   }
   return p;
 }
diff --git a/abseil-cpp/absl/random/generators_test.cc b/abseil-cpp/absl/random/generators_test.cc
index 41725f1..2009130 100644
--- a/abseil-cpp/absl/random/generators_test.cc
+++ b/abseil-cpp/absl/random/generators_test.cc
@@ -49,7 +49,7 @@
   // (a, b) semantics, inferred types.
   absl::Uniform(absl::IntervalOpenOpen, *gen, 0, 1.0);  // Promoted to double
 
-  // Explict overriding of types.
+  // Explicit overriding of types.
   absl::Uniform<int>(*gen, 0, 100);
   absl::Uniform<int8_t>(*gen, 0, 100);
   absl::Uniform<int16_t>(*gen, 0, 100);
@@ -107,6 +107,8 @@
   absl::Poisson<int64_t>(*gen);
   absl::Poisson<uint64_t>(*gen);
   absl::Poisson<uint64_t>(URBG());
+  absl::Poisson<absl::int128>(*gen);
+  absl::Poisson<absl::uint128>(*gen);
 }
 
 template <typename URBG>
@@ -115,6 +117,7 @@
   absl::Bernoulli(*gen, 0.5);
 }
 
+
 template <typename URBG>
 void TestZipf(URBG* gen) {
   absl::Zipf<int>(*gen, 100);
@@ -126,6 +129,8 @@
   absl::Zipf<int64_t>(*gen, 1 << 10);
   absl::Zipf<uint64_t>(*gen, 1 << 10);
   absl::Zipf<uint64_t>(URBG(), 1 << 10);
+  absl::Zipf<absl::int128>(*gen, 1 << 10);
+  absl::Zipf<absl::uint128>(*gen, 1 << 10);
 }
 
 template <typename URBG>
@@ -146,6 +151,8 @@
   absl::LogUniform<int64_t>(*gen, 0, 1 << 10);
   absl::LogUniform<uint64_t>(*gen, 0, 1 << 10);
   absl::LogUniform<uint64_t>(URBG(), 0, 1 << 10);
+  absl::LogUniform<absl::int128>(*gen, 0, 1 << 10);
+  absl::LogUniform<absl::uint128>(*gen, 0, 1 << 10);
 }
 
 template <typename URBG>
diff --git a/abseil-cpp/absl/random/internal/BUILD.bazel b/abseil-cpp/absl/random/internal/BUILD.bazel
index 8485e28..37f4d6e 100644
--- a/abseil-cpp/absl/random/internal/BUILD.bazel
+++ b/abseil-cpp/absl/random/internal/BUILD.bazel
@@ -14,8 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
-
 # Internal-only implementation classes for Abseil Random
 load(
     "//absl:copts/configure_copts.bzl",
@@ -26,9 +24,11 @@
     "absl_random_randen_copts_init",
 )
 
-package(default_visibility = [
+default_package_visibility = [
     "//absl/random:__pkg__",
-])
+]
+
+package(default_visibility = default_package_visibility)
 
 licenses(["notice"])
 
@@ -37,7 +37,11 @@
     hdrs = ["traits.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = ["//absl/base:config"],
+    deps = [
+        "//absl/base:config",
+        "//absl/numeric:bits",
+        "//absl/numeric:int128",
+    ],
 )
 
 cc_library(
@@ -60,6 +64,7 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":traits",
         "//absl/base:config",
         "//absl/meta:type_traits",
     ],
@@ -75,12 +80,18 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS + select({
-        "//absl:windows": ["-DEFAULTLIB:bcrypt.lib"],
+        "//absl:msvc_compiler": ["-DEFAULTLIB:bcrypt.lib"],
+        "//absl:clang-cl_compiler": ["-DEFAULTLIB:bcrypt.lib"],
+        "//absl:mingw_compiler": [
+            "-DEFAULTLIB:bcrypt.lib",
+            "-lbcrypt",
+        ],
         "//conditions:default": [],
     }),
     deps = [
         ":fast_uniform_bits",
         "//absl/base:core_headers",
+        "//absl/base:dynamic_annotations",
         "//absl/base:raw_logging_internal",
         "//absl/strings",
         "//absl/types:optional",
@@ -98,7 +109,8 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = select({
-        "//absl:windows": [],
+        "//absl:msvc_compiler": [],
+        "//absl:clang-cl_compiler": [],
         "//absl:wasm": [],
         "//conditions:default": ["-pthread"],
     }) + ABSL_DEFAULT_LINKOPTS,
@@ -124,7 +136,10 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = ["//absl/base:config"],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:endian",
+    ],
 )
 
 cc_library(
@@ -175,8 +190,8 @@
     deps = [
         ":fastmath",
         ":traits",
-        "//absl/base:bits",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
     ],
 )
 
@@ -187,7 +202,7 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = ["//absl/base:bits"],
+    deps = ["//absl/numeric:bits"],
 )
 
 cc_library(
@@ -197,8 +212,8 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":traits",
-        "//absl/base:bits",
         "//absl/base:config",
+        "//absl/numeric:bits",
         "//absl/numeric:int128",
     ],
 )
@@ -213,8 +228,8 @@
         ":salted_seed_seq",
         ":seed_material",
         "//absl/base:core_headers",
+        "//absl/container:inlined_vector",
         "//absl/meta:type_traits",
-        "//absl/types:optional",
         "//absl/types:span",
     ],
 )
@@ -229,6 +244,7 @@
         ":iostream_state_saver",
         "//absl/base:config",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/numeric:int128",
     ],
 )
@@ -238,9 +254,12 @@
     hdrs = ["randen_engine.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = default_package_visibility + [
+    ],
     deps = [
         ":iostream_state_saver",
         ":randen",
+        "//absl/base:endian",
         "//absl/meta:type_traits",
     ],
 )
@@ -289,6 +308,8 @@
         ":platform",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:endian",
+        "//absl/numeric:int128",
     ],
 )
 
@@ -320,7 +341,8 @@
         "randen_hwaes.h",
     ],
     copts = ABSL_DEFAULT_COPTS + ABSL_RANDOM_RANDEN_COPTS + select({
-        "//absl:windows": [],
+        "//absl:msvc_compiler": [],
+        "//absl:clang-cl_compiler": [],
         "//conditions:default": ["-Wno-pass-failed"],
     }),
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -328,6 +350,7 @@
         ":platform",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/numeric:int128",
     ],
 )
 
@@ -374,7 +397,7 @@
     "no_test_darwin_x86_64",
     "no_test_ios_x86_64",
     "no_test_loonix",
-    "no_test_msvc_x64",
+    "no_test_lexan",
     "no_test_wasm",
 ]
 
@@ -400,8 +423,8 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":generate_real",
-        "//absl/base:bits",
         "//absl/flags:flag",
+        "//absl/numeric:bits",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -487,6 +510,7 @@
 cc_library(
     name = "mock_helpers",
     hdrs = ["mock_helpers.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:fast_type_id",
         "//absl/types:optional",
@@ -497,6 +521,7 @@
     name = "mock_overload_set",
     testonly = 1,
     hdrs = ["mock_overload_set.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":mock_helpers",
         "//absl/random:mocking_bit_gen",
@@ -576,7 +601,7 @@
     deps = [
         ":explicit_seed_seq",
         ":randen_engine",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/strings",
         "//absl/time",
         "@com_google_googletest//:gtest_main",
@@ -605,6 +630,7 @@
     deps = [
         ":platform",
         ":randen_slow",
+        "//absl/base:endian",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -613,14 +639,14 @@
     name = "randen_hwaes_test",
     size = "small",
     srcs = ["randen_hwaes_test.cc"],
-    copts = ABSL_TEST_COPTS,
+    copts = ABSL_TEST_COPTS + ABSL_RANDOM_RANDEN_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = ABSL_RANDOM_NONPORTABLE_TAGS,
     deps = [
         ":platform",
         ":randen_hwaes",
         ":randen_hwaes_impl",  # build_cleaner: keep
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/strings:str_format",
         "@com_google_googletest//:gtest",
     ],
@@ -634,7 +660,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":wide_multiply",
-        "//absl/base:bits",
+        "//absl/numeric:bits",
         "//absl/numeric:int128",
         "@com_google_googletest//:gtest_main",
     ],
@@ -663,6 +689,7 @@
         ":traits",
         "//absl/base:config",
         "//absl/meta:type_traits",
+        "//absl/numeric:int128",
     ],
 )
 
@@ -676,11 +703,14 @@
         "benchmark",
         "no_test_ios_x86_64",
         "no_test_loonix",  # Crashing.
+        "no_test_wasm",
     ],
     deps = [
         ":nanobenchmark",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/log:check",
         "//absl/strings",
+        "//absl/strings:str_format",
     ],
 )
 
@@ -708,7 +738,6 @@
 
 cc_test(
     name = "iostream_state_saver_test",
-    size = "small",
     srcs = ["iostream_state_saver_test.cc"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
diff --git a/abseil-cpp/absl/random/internal/chi_square.cc b/abseil-cpp/absl/random/internal/chi_square.cc
index 640d48c..fbe0173 100644
--- a/abseil-cpp/absl/random/internal/chi_square.cc
+++ b/abseil-cpp/absl/random/internal/chi_square.cc
@@ -125,7 +125,8 @@
     const double variance = 2.0 / (9 * dof);
     // Cannot use this method if the variance is 0.
     if (variance != 0) {
-      return std::pow(z * std::sqrt(variance) + mean, 3.0) * dof;
+      double term = z * std::sqrt(variance) + mean;
+      return dof * (term * term * term);
     }
   }
 
diff --git a/abseil-cpp/absl/random/internal/distribution_caller.h b/abseil-cpp/absl/random/internal/distribution_caller.h
index fc81b78..0f162a4 100644
--- a/abseil-cpp/absl/random/internal/distribution_caller.h
+++ b/abseil-cpp/absl/random/internal/distribution_caller.h
@@ -18,6 +18,7 @@
 #define ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_
 
 #include <utility>
+#include <type_traits>
 
 #include "absl/base/config.h"
 #include "absl/base/internal/fast_type_id.h"
@@ -32,6 +33,8 @@
 // to intercept such calls.
 template <typename URBG>
 struct DistributionCaller {
+  static_assert(!std::is_pointer<URBG>::value,
+                "You must pass a reference, not a pointer.");
   // SFINAE to detect whether the URBG type includes a member matching
   // bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
   //
diff --git a/abseil-cpp/absl/random/internal/distribution_test_util.cc b/abseil-cpp/absl/random/internal/distribution_test_util.cc
index e900565..9fa37bd 100644
--- a/abseil-cpp/absl/random/internal/distribution_test_util.cc
+++ b/abseil-cpp/absl/random/internal/distribution_test_util.cc
@@ -213,7 +213,7 @@
   double result = 1.;
   int ns = static_cast<int>(q + xc * psq);
 
-  // Use the soper reduction forumla.
+  // Use the soper reduction formula.
   double rx = (ns == 0) ? x : x / xc;
   double temp = q - ai;
   for (;;) {
@@ -236,7 +236,7 @@
     }
   }
 
-  // NOTE: See also TOMS Alogrithm 708.
+  // NOTE: See also TOMS Algorithm 708.
   // http://www.netlib.org/toms/index.html
   //
   // NOTE: The NWSC library also includes BRATIO / ISUBX (p87)
@@ -247,7 +247,7 @@
 // https://www.jstor.org/stable/2346798?read-now=1&seq=4#page_scan_tab_contents
 // https://www.jstor.org/stable/2346887?seq=1#page_scan_tab_contents
 //
-// XINBTA(p, q, beta, alhpa)
+// XINBTA(p, q, beta, alpha)
 //  p:     the value of the parameter p.
 //  q:     the value of the parameter q.
 //  beta:  the value of ln B(p, q)
diff --git a/abseil-cpp/absl/random/internal/explicit_seed_seq.h b/abseil-cpp/absl/random/internal/explicit_seed_seq.h
index 6a743ea..25f7915 100644
--- a/abseil-cpp/absl/random/internal/explicit_seed_seq.h
+++ b/abseil-cpp/absl/random/internal/explicit_seed_seq.h
@@ -23,6 +23,7 @@
 #include <vector>
 
 #include "absl/base/config.h"
+#include "absl/base/internal/endian.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
diff --git a/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc b/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc
index a55ad73..e36d5fa 100644
--- a/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc
+++ b/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc
@@ -24,6 +24,8 @@
 
 namespace {
 
+using ::absl::random_internal::ExplicitSeedSeq;
+
 template <typename Sseq>
 bool ConformsToInterface() {
   // Check that the SeedSequence can be default-constructed.
@@ -64,14 +66,14 @@
   EXPECT_TRUE(ConformsToInterface<std::seed_seq>());
 
   // Abseil classes
-  EXPECT_TRUE(ConformsToInterface<absl::random_internal::ExplicitSeedSeq>());
+  EXPECT_TRUE(ConformsToInterface<ExplicitSeedSeq>());
 }
 
 TEST(ExplicitSeedSeq, DefaultConstructorGeneratesZeros) {
   const size_t kNumBlocks = 128;
 
   uint32_t outputs[kNumBlocks];
-  absl::random_internal::ExplicitSeedSeq seq;
+  ExplicitSeedSeq seq;
   seq.generate(outputs, &outputs[kNumBlocks]);
 
   for (uint32_t& seed : outputs) {
@@ -87,8 +89,7 @@
   for (uint32_t& seed : seed_material) {
     seed = urandom();
   }
-  absl::random_internal::ExplicitSeedSeq seq(seed_material,
-                                             &seed_material[kNumBlocks]);
+  ExplicitSeedSeq seq(seed_material, &seed_material[kNumBlocks]);
 
   // Check that output is same as seed-material provided to constructor.
   {
@@ -133,17 +134,14 @@
   for (uint32_t& entry : entropy) {
     entry = urandom();
   }
-  absl::random_internal::ExplicitSeedSeq seq_from_entropy(std::begin(entropy),
-                                                          std::end(entropy));
+  ExplicitSeedSeq seq_from_entropy(std::begin(entropy), std::end(entropy));
   // Copy constructor.
   {
-    absl::random_internal::ExplicitSeedSeq seq_copy(seq_from_entropy);
+    ExplicitSeedSeq seq_copy(seq_from_entropy);
     EXPECT_EQ(seq_copy.size(), seq_from_entropy.size());
 
-    std::vector<uint32_t> seeds_1;
-    seeds_1.resize(1000, 0);
-    std::vector<uint32_t> seeds_2;
-    seeds_2.resize(1000, 1);
+    std::vector<uint32_t> seeds_1(1000, 0);
+    std::vector<uint32_t> seeds_2(1000, 1);
 
     seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
     seq_copy.generate(seeds_2.begin(), seeds_2.end());
@@ -155,13 +153,10 @@
     for (uint32_t& entry : entropy) {
       entry = urandom();
     }
-    absl::random_internal::ExplicitSeedSeq another_seq(std::begin(entropy),
-                                                       std::end(entropy));
+    ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy));
 
-    std::vector<uint32_t> seeds_1;
-    seeds_1.resize(1000, 0);
-    std::vector<uint32_t> seeds_2;
-    seeds_2.resize(1000, 0);
+    std::vector<uint32_t> seeds_1(1000, 0);
+    std::vector<uint32_t> seeds_2(1000, 0);
 
     seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
     another_seq.generate(seeds_2.begin(), seeds_2.end());
@@ -170,7 +165,15 @@
     EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2)));
 
     // Apply the assignment-operator.
+    // GCC 12 has a false-positive -Wstringop-overflow warning here.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstringop-overflow"
+#endif
     another_seq = seq_from_entropy;
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
 
     // Re-generate seeds.
     seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
@@ -182,15 +185,13 @@
   // Move constructor.
   {
     // Get seeds from seed-sequence constructed from entropy.
-    std::vector<uint32_t> seeds_1;
-    seeds_1.resize(1000, 0);
+    std::vector<uint32_t> seeds_1(1000, 0);
     seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
 
     // Apply move-constructor move the sequence to another instance.
     absl::random_internal::ExplicitSeedSeq moved_seq(
         std::move(seq_from_entropy));
-    std::vector<uint32_t> seeds_2;
-    seeds_2.resize(1000, 1);
+    std::vector<uint32_t> seeds_2(1000, 1);
     moved_seq.generate(seeds_2.begin(), seeds_2.end());
     // Verify that seeds produced by moved-instance are the same as original.
     EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
@@ -202,3 +203,35 @@
     EXPECT_THAT(seeds_1, Each(Eq(0)));
   }
 }
+
+TEST(ExplicitSeedSeq, StdURBGGoldenTests) {
+  // Verify that for std::- URBG instances the results are stable across
+  // platforms (these should have deterministic output).
+  {
+    ExplicitSeedSeq seed_sequence{12, 34, 56};
+    std::minstd_rand rng(seed_sequence);
+
+    std::minstd_rand::result_type values[4] = {rng(), rng(), rng(), rng()};
+    EXPECT_THAT(values,
+                testing::ElementsAre(579252, 43785881, 464353103, 1501811174));
+  }
+
+  {
+    ExplicitSeedSeq seed_sequence{12, 34, 56};
+    std::mt19937 rng(seed_sequence);
+
+    std::mt19937::result_type values[4] = {rng(), rng(), rng(), rng()};
+    EXPECT_THAT(values, testing::ElementsAre(138416803, 151130212, 33817739,
+                                             138416803));
+  }
+
+  {
+    ExplicitSeedSeq seed_sequence{12, 34, 56};
+    std::mt19937_64 rng(seed_sequence);
+
+    std::mt19937_64::result_type values[4] = {rng(), rng(), rng(), rng()};
+    EXPECT_THAT(values,
+                testing::ElementsAre(19738651785169348, 1464811352364190456,
+                                     18054685302720800, 19738651785169348));
+  }
+}
diff --git a/abseil-cpp/absl/random/internal/fast_uniform_bits.h b/abseil-cpp/absl/random/internal/fast_uniform_bits.h
index 425aaf7..83ee5c0 100644
--- a/abseil-cpp/absl/random/internal/fast_uniform_bits.h
+++ b/abseil-cpp/absl/random/internal/fast_uniform_bits.h
@@ -22,6 +22,7 @@
 
 #include "absl/base/config.h"
 #include "absl/meta/type_traits.h"
+#include "absl/random/internal/traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -56,9 +57,10 @@
 // `PowerOfTwoVariate(urbg)`.
 template <typename URBG>
 constexpr size_t NumBits() {
-  return RangeSize<URBG>() == 0
-             ? std::numeric_limits<typename URBG::result_type>::digits
-             : IntegerLog2(RangeSize<URBG>());
+  return static_cast<size_t>(
+      RangeSize<URBG>() == 0
+          ? std::numeric_limits<typename URBG::result_type>::digits
+          : IntegerLog2(RangeSize<URBG>()));
 }
 
 // Given a shift value `n`, constructs a mask with exactly the low `n` bits set.
@@ -98,7 +100,7 @@
   result_type operator()(URBG& g);  // NOLINT(runtime/references)
 
  private:
-  static_assert(std::is_unsigned<UIntType>::value,
+  static_assert(IsUnsigned<UIntType>::value,
                 "Class-template FastUniformBits<> must be parameterized using "
                 "an unsigned type.");
 
@@ -150,7 +152,8 @@
 
   result_type r = static_cast<result_type>(g() - kMin);
   for (size_t n = 1; n < kIters; ++n) {
-    r = (r << kShift) + static_cast<result_type>(g() - kMin);
+    r = static_cast<result_type>(r << kShift) +
+        static_cast<result_type>(g() - kMin);
   }
   return r;
 }
diff --git a/abseil-cpp/absl/random/internal/fast_uniform_bits_test.cc b/abseil-cpp/absl/random/internal/fast_uniform_bits_test.cc
index cee702d..34c2520 100644
--- a/abseil-cpp/absl/random/internal/fast_uniform_bits_test.cc
+++ b/abseil-cpp/absl/random/internal/fast_uniform_bits_test.cc
@@ -167,7 +167,7 @@
              FakeUrbg<uint64_t, 0, (std::numeric_limits<uint64_t>::max)()>>()));
 }
 
-// The constants need to be choosen so that an infinite rejection loop doesn't
+// The constants need to be chosen so that an infinite rejection loop doesn't
 // happen...
 using Urng1_5bit = FakeUrbg<uint8_t, 0, 2, 0>;  // ~1.5 bits (range 3)
 using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>;
diff --git a/abseil-cpp/absl/random/internal/fastmath.h b/abseil-cpp/absl/random/internal/fastmath.h
index 6baeb5a..963b769 100644
--- a/abseil-cpp/absl/random/internal/fastmath.h
+++ b/abseil-cpp/absl/random/internal/fastmath.h
@@ -22,27 +22,22 @@
 #include <cmath>
 #include <cstdint>
 
-#include "absl/base/internal/bits.h"
+#include "absl/numeric/bits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-// Returns the position of the first bit set.
-inline int LeadingSetBit(uint64_t n) {
-  return 64 - base_internal::CountLeadingZeros64(n);
-}
-
 // Compute log2(n) using integer operations.
 // While std::log2 is more accurate than std::log(n) / std::log(2), for
 // very large numbers--those close to std::numeric_limits<uint64_t>::max() - 2,
 // for instance--std::log2 rounds up rather than down, which introduces
 // definite skew in the results.
 inline int IntLog2Floor(uint64_t n) {
-  return (n <= 1) ? 0 : (63 - base_internal::CountLeadingZeros64(n));
+  return (n <= 1) ? 0 : (63 - countl_zero(n));
 }
 inline int IntLog2Ceil(uint64_t n) {
-  return (n <= 1) ? 0 : (64 - base_internal::CountLeadingZeros64(n - 1));
+  return (n <= 1) ? 0 : (64 - countl_zero(n - 1));
 }
 
 inline double StirlingLogFactorial(double n) {
@@ -55,18 +50,6 @@
          (1.0 / 360.0) * ninv * ninv * ninv;
 }
 
-// Rotate value right.
-//
-// We only implement the uint32_t / uint64_t versions because
-// 1) those are the only ones we use, and
-// 2) those are the only ones where clang detects the rotate idiom correctly.
-inline constexpr uint32_t rotr(uint32_t value, uint8_t bits) {
-  return (value >> (bits & 31)) | (value << ((-bits) & 31));
-}
-inline constexpr uint64_t rotr(uint64_t value, uint8_t bits) {
-  return (value >> (bits & 63)) | (value << ((-bits) & 63));
-}
-
 }  // namespace random_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/random/internal/fastmath_test.cc b/abseil-cpp/absl/random/internal/fastmath_test.cc
index 65859c2..0d6f9dc 100644
--- a/abseil-cpp/absl/random/internal/fastmath_test.cc
+++ b/abseil-cpp/absl/random/internal/fastmath_test.cc
@@ -27,19 +27,6 @@
 
 namespace {
 
-TEST(DistributionImplTest, LeadingSetBit) {
-  using absl::random_internal::LeadingSetBit;
-  constexpr uint64_t kZero = 0;
-  EXPECT_EQ(0, LeadingSetBit(kZero));
-  EXPECT_EQ(64, LeadingSetBit(~kZero));
-
-  for (int index = 0; index < 64; index++) {
-    uint64_t x = static_cast<uint64_t>(1) << index;
-    EXPECT_EQ(index + 1, LeadingSetBit(x)) << index;
-    EXPECT_EQ(index + 1, LeadingSetBit(x + x - 1)) << index;
-  }
-}
-
 TEST(FastMathTest, IntLog2FloorTest) {
   using absl::random_internal::IntLog2Floor;
   constexpr uint64_t kZero = 0;
diff --git a/abseil-cpp/absl/random/internal/generate_real.h b/abseil-cpp/absl/random/internal/generate_real.h
index 20f6d20..9a6f400 100644
--- a/abseil-cpp/absl/random/internal/generate_real.h
+++ b/abseil-cpp/absl/random/internal/generate_real.h
@@ -23,8 +23,8 @@
 #include <limits>
 #include <type_traits>
 
-#include "absl/base/internal/bits.h"
 #include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
 #include "absl/random/internal/fastmath.h"
 #include "absl/random/internal/traits.h"
 
@@ -50,10 +50,10 @@
 // inputs, otherwise it never returns 0.
 //
 // When a value in U(0,1) is required, use:
-//   Uniform64ToReal<double, PositiveValueT, true>;
+//   GenerateRealFromBits<double, PositiveValueT, true>;
 //
 // When a value in U(-1,1) is required, use:
-//   Uniform64ToReal<double, SignedValueT, false>;
+//   GenerateRealFromBits<double, SignedValueT, false>;
 //
 //   This generates more distinct values than the mathematical equivalent
 //   `U(0, 1) * 2.0 - 1.0`.
@@ -78,7 +78,7 @@
       "GenerateRealFromBits must be parameterized by either float or double.");
 
   static_assert(sizeof(uint_type) == sizeof(real_type),
-                "Mismatched unsinged and real types.");
+                "Mismatched unsigned and real types.");
 
   static_assert((std::numeric_limits<real_type>::is_iec559 &&
                  std::numeric_limits<real_type>::radix == 2),
@@ -120,17 +120,15 @@
 
   // Number of leading zeros is mapped to the exponent: 2^-clz
   // bits is 0..01xxxxxx. After shifting, we're left with 1xxx...0..0
-  int clz = base_internal::CountLeadingZeros64(bits);
+  int clz = countl_zero(bits);
   bits <<= (IncludeZero ? clz : (clz & 63));  // remove 0-bits.
   exp -= clz;                                 // set the exponent.
   bits >>= (63 - kExp);
 
   // Construct the 32-bit or 64-bit IEEE 754 floating-point value from
   // the individual fields: sign, exp, mantissa(bits).
-  uint_type val =
-      (std::is_same<SignedTag, GeneratePositiveTag>::value ? 0u : sign) |
-      (static_cast<uint_type>(exp) << kExp) |
-      (static_cast<uint_type>(bits) & kMask);
+  uint_type val = sign | (static_cast<uint_type>(exp) << kExp) |
+                  (static_cast<uint_type>(bits) & kMask);
 
   // bit_cast to the output-type
   real_type result;
diff --git a/abseil-cpp/absl/random/internal/generate_real_test.cc b/abseil-cpp/absl/random/internal/generate_real_test.cc
index aa02f0c..b099dbf 100644
--- a/abseil-cpp/absl/random/internal/generate_real_test.cc
+++ b/abseil-cpp/absl/random/internal/generate_real_test.cc
@@ -20,8 +20,8 @@
 #include <string>
 
 #include "gtest/gtest.h"
-#include "absl/base/internal/bits.h"
 #include "absl/flags/flag.h"
+#include "absl/numeric/bits.h"
 
 ABSL_FLAG(int64_t, absl_random_test_trials, 50000,
           "Number of trials for the probability tests.");
@@ -413,14 +413,13 @@
 }
 
 TEST(GenerateRealTest, ExhaustiveFloat) {
-  using absl::base_internal::CountLeadingZeros64;
   auto ToFloat = [](uint64_t a) {
     return GenerateRealFromBits<float, GeneratePositiveTag, true>(a);
   };
 
   // Rely on RandU64ToFloat generating values from greatest to least when
-  // supplied with uint64_t values from greatest (0xfff...) to least (0x0).  Thus,
-  // this algorithm stores the previous value, and if the new value is at
+  // supplied with uint64_t values from greatest (0xfff...) to least (0x0).
+  // Thus, this algorithm stores the previous value, and if the new value is at
   // greater than or equal to the previous value, then there is a collision in
   // the generation algorithm.
   //
@@ -464,7 +463,7 @@
 
     // Adjust decrement and check value based on how many leading 0
     // bits are set in the current value.
-    const int clz = CountLeadingZeros64(x);
+    const int clz = absl::countl_zero(x);
     if (clz < kDig) {
       dec <<= (kDig - clz);
       chk = (~uint64_t(0)) >> (clz + 1);
diff --git a/abseil-cpp/absl/random/internal/iostream_state_saver_test.cc b/abseil-cpp/absl/random/internal/iostream_state_saver_test.cc
index 7bb8ad9..ea9d2af 100644
--- a/abseil-cpp/absl/random/internal/iostream_state_saver_test.cc
+++ b/abseil-cpp/absl/random/internal/iostream_state_saver_test.cc
@@ -14,6 +14,9 @@
 
 #include "absl/random/internal/iostream_state_saver.h"
 
+#include <errno.h>
+#include <stdio.h>
+
 #include <sstream>
 #include <string>
 
@@ -272,7 +275,6 @@
   }
 }
 
-#if !defined(__EMSCRIPTEN__)
 TEST(IOStreamStateSaver, RoundTripLongDoubles) {
   // Technically, C++ only guarantees that long double is at least as large as a
   // double.  Practically it varies from 64-bits to 128-bits.
@@ -343,14 +345,14 @@
     }
 
     // Avoid undefined behavior (overflow/underflow).
-    if (dd <= std::numeric_limits<int64_t>::max() &&
-        dd >= std::numeric_limits<int64_t>::lowest()) {
+    if (dd <= static_cast<long double>(std::numeric_limits<int64_t>::max()) &&
+        dd >=
+            static_cast<long double>(std::numeric_limits<int64_t>::lowest())) {
       int64_t x = static_cast<int64_t>(dd);
       EXPECT_EQ(x, StreamRoundTrip<int64_t>(x));
     }
   }
 }
-#endif  // !defined(__EMSCRIPTEN__)
 
 TEST(StrToDTest, DoubleMin) {
   const char kV[] = "2.22507385850720138e-308";
diff --git a/abseil-cpp/absl/random/internal/mock_helpers.h b/abseil-cpp/absl/random/internal/mock_helpers.h
index 9af27ab..a7a97bf 100644
--- a/abseil-cpp/absl/random/internal/mock_helpers.h
+++ b/abseil-cpp/absl/random/internal/mock_helpers.h
@@ -18,6 +18,7 @@
 
 #include <tuple>
 #include <type_traits>
+#include <utility>
 
 #include "absl/base/internal/fast_type_id.h"
 #include "absl/types/optional.h"
@@ -80,6 +81,13 @@
   }
 
  public:
+  // InvokeMock is private; this provides access for some specialized use cases.
+  template <typename URBG>
+  static inline bool PrivateInvokeMock(URBG* urbg, IdType type,
+                                       void* args_tuple, void* result) {
+    return urbg->InvokeMock(type, args_tuple, result);
+  }
+
   // Invoke a mock for the KeyT (may or may not be a signature).
   //
   // KeyT is used to generate a typeid-based lookup key for the mock.
@@ -93,7 +101,7 @@
   template <typename KeyT, typename URBG, typename... Args>
   static auto MaybeInvokeMock(URBG* urbg, Args&&... args)
       -> absl::optional<typename KeySignature<KeyT>::result_type> {
-    // Use function overloading to dispatch to the implemenation since
+    // Use function overloading to dispatch to the implementation since
     // more modern patterns (e.g. require + constexpr) are not supported in all
     // compiler configurations.
     return InvokeMockImpl<KeyT, typename KeySignature<KeyT>::result_type,
@@ -109,14 +117,14 @@
   // The mocked function signature will be composed from KeyT as:
   //   result_type(args...)
   template <typename KeyT, typename MockURBG>
-  static auto MockFor(MockURBG& m) -> decltype(
-      std::declval<MockURBG>()
-          .template RegisterMock<typename KeySignature<KeyT>::result_type,
-                                 typename KeySignature<KeyT>::arg_tuple_type>(
-              std::declval<IdType>())) {
+  static auto MockFor(MockURBG& m)
+      -> decltype(m.template RegisterMock<
+                  typename KeySignature<KeyT>::result_type,
+                  typename KeySignature<KeyT>::arg_tuple_type>(
+          m, std::declval<IdType>())) {
     return m.template RegisterMock<typename KeySignature<KeyT>::result_type,
                                    typename KeySignature<KeyT>::arg_tuple_type>(
-        ::absl::base_internal::FastTypeId<KeyT>());
+        m, ::absl::base_internal::FastTypeId<KeyT>());
   }
 };
 
diff --git a/abseil-cpp/absl/random/internal/mock_overload_set.h b/abseil-cpp/absl/random/internal/mock_overload_set.h
index dccc6ce..0d9c6c1 100644
--- a/abseil-cpp/absl/random/internal/mock_overload_set.h
+++ b/abseil-cpp/absl/random/internal/mock_overload_set.h
@@ -19,7 +19,6 @@
 #include <type_traits>
 
 #include "gmock/gmock.h"
-#include "gtest/gtest.h"
 #include "absl/random/internal/mock_helpers.h"
 #include "absl/random/mocking_bit_gen.h"
 
@@ -45,10 +44,12 @@
                 "Overload signature must have return type matching the "
                 "distribution result_type.");
   using KeyT = Ret(DistrT, std::tuple<Args...>);
-  auto gmock_Call(
-      absl::MockingBitGen& gen,  // NOLINT(google-runtime-references)
-      const ::testing::Matcher<Args>&... matchers)
+
+  template <typename MockURBG>
+  auto gmock_Call(MockURBG& gen, const ::testing::Matcher<Args>&... matchers)
       -> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...)) {
+    static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
+                  "Mocking requires an absl::MockingBitGen");
     return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...);
   }
 };
@@ -59,12 +60,14 @@
                 "Overload signature must have return type matching the "
                 "distribution result_type.");
   using KeyT = Ret(DistrT, std::tuple<Arg, Args...>);
-  auto gmock_Call(
-      const ::testing::Matcher<Arg>& matcher,
-      absl::MockingBitGen& gen,  // NOLINT(google-runtime-references)
-      const ::testing::Matcher<Args>&... matchers)
+
+  template <typename MockURBG>
+  auto gmock_Call(const ::testing::Matcher<Arg>& matcher, MockURBG& gen,
+                  const ::testing::Matcher<Args>&... matchers)
       -> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher,
                                                              matchers...)) {
+    static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
+                  "Mocking requires an absl::MockingBitGen");
     return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher, matchers...);
   }
 };
diff --git a/abseil-cpp/absl/random/internal/nanobenchmark.cc b/abseil-cpp/absl/random/internal/nanobenchmark.cc
index c918181..0f31a7d 100644
--- a/abseil-cpp/absl/random/internal/nanobenchmark.cc
+++ b/abseil-cpp/absl/random/internal/nanobenchmark.cc
@@ -361,7 +361,7 @@
   // Write that many copies of each unique value to the array.
   T* ABSL_RANDOM_INTERNAL_RESTRICT p = values;
   for (const auto& value_count : unique) {
-    std::fill(p, p + value_count.second, value_count.first);
+    std::fill_n(p, value_count.second, value_count.first);
     p += value_count.second;
   }
   ABSL_RAW_CHECK(p == values + num_values, "Did not produce enough output");
diff --git a/abseil-cpp/absl/random/internal/nanobenchmark_test.cc b/abseil-cpp/absl/random/internal/nanobenchmark_test.cc
index f1571e2..d4f1028 100644
--- a/abseil-cpp/absl/random/internal/nanobenchmark_test.cc
+++ b/abseil-cpp/absl/random/internal/nanobenchmark_test.cc
@@ -14,8 +14,10 @@
 
 #include "absl/random/internal/nanobenchmark.h"
 
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
 #include "absl/strings/numbers.h"
+#include "absl/strings/str_format.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -36,16 +38,16 @@
   params.max_evals = 6;  // avoid test timeout
   const size_t num_results = Measure(&Div, nullptr, inputs, N, results, params);
   if (num_results == 0) {
-    ABSL_RAW_LOG(
-        WARNING,
-        "WARNING: Measurement failed, should not happen when using "
-        "PinThreadToCPU unless the region to measure takes > 1 second.\n");
+    LOG(WARNING)
+        << "WARNING: Measurement failed, should not happen when using "
+           "PinThreadToCPU unless the region to measure takes > 1 second.";
     return;
   }
   for (size_t i = 0; i < num_results; ++i) {
-    ABSL_RAW_LOG(INFO, "%5zu: %6.2f ticks; MAD=%4.2f%%\n", results[i].input,
-                 results[i].ticks, results[i].variability * 100.0);
-    ABSL_RAW_CHECK(results[i].ticks != 0.0f, "Zero duration");
+    LOG(INFO) << absl::StreamFormat("%5u: %6.2f ticks; MAD=%4.2f%%\n",
+                                    results[i].input, results[i].ticks,
+                                    results[i].variability * 100.0);
+    CHECK_NE(results[i].ticks, 0.0f) << "Zero duration";
   }
 }
 
@@ -54,7 +56,7 @@
   int cpu = -1;
   if (argc == 2) {
     if (!absl::SimpleAtoi(argv[1], &cpu)) {
-      ABSL_RAW_LOG(FATAL, "The optional argument must be a CPU number >= 0.\n");
+      LOG(FATAL) << "The optional argument must be a CPU number >= 0.";
     }
   }
   PinThreadToCPU(cpu);
diff --git a/abseil-cpp/absl/random/internal/nonsecure_base.h b/abseil-cpp/absl/random/internal/nonsecure_base.h
index 730fa2e..c3b8033 100644
--- a/abseil-cpp/absl/random/internal/nonsecure_base.h
+++ b/abseil-cpp/absl/random/internal/nonsecure_base.h
@@ -17,28 +17,82 @@
 
 #include <algorithm>
 #include <cstdint>
-#include <iostream>
 #include <iterator>
-#include <random>
-#include <string>
 #include <type_traits>
+#include <utility>
 #include <vector>
 
 #include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
 #include "absl/meta/type_traits.h"
 #include "absl/random/internal/pool_urbg.h"
 #include "absl/random/internal/salted_seed_seq.h"
 #include "absl/random/internal/seed_material.h"
-#include "absl/types/optional.h"
 #include "absl/types/span.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
+// RandenPoolSeedSeq is a custom seed sequence type where generate() fills the
+// provided buffer via the RandenPool entropy source.
+class RandenPoolSeedSeq {
+ private:
+  struct ContiguousTag {};
+  struct BufferTag {};
+
+  // Generate random unsigned values directly into the buffer.
+  template <typename Contiguous>
+  void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
+    const size_t n = static_cast<size_t>(std::distance(begin, end));
+    auto* a = &(*begin);
+    RandenPool<uint8_t>::Fill(
+        absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
+  }
+
+  // Construct a buffer of size n and fill it with values, then copy
+  // those values into the seed iterators.
+  template <typename RandomAccessIterator>
+  void generate_impl(BufferTag, RandomAccessIterator begin,
+                     RandomAccessIterator end) {
+    const size_t n = std::distance(begin, end);
+    absl::InlinedVector<uint32_t, 8> data(n, 0);
+    RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
+    std::copy(std::begin(data), std::end(data), begin);
+  }
+
+ public:
+  using result_type = uint32_t;
+
+  size_t size() { return 0; }
+
+  template <typename OutIterator>
+  void param(OutIterator) const {}
+
+  template <typename RandomAccessIterator>
+  void generate(RandomAccessIterator begin, RandomAccessIterator end) {
+    // RandomAccessIterator must be assignable from uint32_t
+    if (begin != end) {
+      using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
+      // ContiguousTag indicates the common case of a known contiguous buffer,
+      // which allows directly filling the buffer. In C++20,
+      // std::contiguous_iterator_tag provides a mechanism for testing this
+      // capability, however until Abseil's support requirements allow us to
+      // assume C++20, limit checks to a few common cases.
+      using TagType = absl::conditional_t<
+          (std::is_pointer<RandomAccessIterator>::value ||
+           std::is_same<RandomAccessIterator,
+                        typename std::vector<U>::iterator>::value),
+          ContiguousTag, BufferTag>;
+
+      generate_impl(TagType{}, begin, end);
+    }
+  }
+};
+
 // Each instance of NonsecureURBGBase<URBG> will be seeded by variates produced
 // by a thread-unique URBG-instance.
-template <typename URBG>
+template <typename URBG, typename Seeder = RandenPoolSeedSeq>
 class NonsecureURBGBase {
  public:
   using result_type = typename URBG::result_type;
@@ -85,49 +139,6 @@
   }
 
  private:
-  // Seeder is a custom seed sequence type where generate() fills the provided
-  // buffer via the RandenPool entropy source.
-  struct Seeder {
-    using result_type = uint32_t;
-
-    size_t size() { return 0; }
-
-    template <typename OutIterator>
-    void param(OutIterator) const {}
-
-    template <typename RandomAccessIterator>
-    void generate(RandomAccessIterator begin, RandomAccessIterator end) {
-      if (begin != end) {
-        // begin, end must be random access iterators assignable from uint32_t.
-        generate_impl(
-            std::integral_constant<bool, sizeof(*begin) == sizeof(uint32_t)>{},
-            begin, end);
-      }
-    }
-
-    // Commonly, generate is invoked with a pointer to a buffer which
-    // can be cast to a uint32_t.
-    template <typename RandomAccessIterator>
-    void generate_impl(std::integral_constant<bool, true>,
-                       RandomAccessIterator begin, RandomAccessIterator end) {
-      auto buffer = absl::MakeSpan(begin, end);
-      auto target = absl::MakeSpan(reinterpret_cast<uint32_t*>(buffer.data()),
-                                   buffer.size());
-      RandenPool<uint32_t>::Fill(target);
-    }
-
-    // The non-uint32_t case should be uncommon, and involves an extra copy,
-    // filling the uint32_t buffer and then mixing into the output.
-    template <typename RandomAccessIterator>
-    void generate_impl(std::integral_constant<bool, false>,
-                       RandomAccessIterator begin, RandomAccessIterator end) {
-      const size_t n = std::distance(begin, end);
-      absl::InlinedVector<uint32_t, 8> data(n, 0);
-      RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
-      std::copy(std::begin(data), std::end(data), begin);
-    }
-  };
-
   static URBG ConstructURBG() {
     Seeder seeder;
     return URBG(seeder);
diff --git a/abseil-cpp/absl/random/internal/nonsecure_base_test.cc b/abseil-cpp/absl/random/internal/nonsecure_base_test.cc
index 698027f..3502243 100644
--- a/abseil-cpp/absl/random/internal/nonsecure_base_test.cc
+++ b/abseil-cpp/absl/random/internal/nonsecure_base_test.cc
@@ -15,6 +15,7 @@
 #include "absl/random/internal/nonsecure_base.h"
 
 #include <algorithm>
+#include <cstdint>
 #include <iostream>
 #include <memory>
 #include <random>
@@ -192,54 +193,35 @@
   }
 }
 
-// This is a PRNG-compatible type specifically designed to test
-// that NonsecureURBGBase::Seeder can correctly handle iterators
-// to arbitrary non-uint32_t size types.
-template <typename T>
-struct SeederTestEngine {
-  using result_type = T;
+TEST(RandenPoolSeedSeqTest, SeederWorksForU32) {
+  absl::random_internal::RandenPoolSeedSeq seeder;
 
-  static constexpr result_type(min)() {
-    return (std::numeric_limits<result_type>::min)();
-  }
-  static constexpr result_type(max)() {
-    return (std::numeric_limits<result_type>::max)();
-  }
-
-  template <class SeedSequence,
-            typename = typename absl::enable_if_t<
-                !std::is_same<SeedSequence, SeederTestEngine>::value>>
-  explicit SeederTestEngine(SeedSequence&& seq) {
-    seed(seq);
-  }
-
-  SeederTestEngine(const SeederTestEngine&) = default;
-  SeederTestEngine& operator=(const SeederTestEngine&) = default;
-  SeederTestEngine(SeederTestEngine&&) = default;
-  SeederTestEngine& operator=(SeederTestEngine&&) = default;
-
-  result_type operator()() { return state[0]; }
-
-  template <class SeedSequence>
-  void seed(SeedSequence&& seq) {
-    std::fill(std::begin(state), std::end(state), T(0));
-    seq.generate(std::begin(state), std::end(state));
-  }
-
-  T state[2];
-};
-
-TEST(NonsecureURBGBase, SeederWorksForU32) {
-  using U32 =
-      absl::random_internal::NonsecureURBGBase<SeederTestEngine<uint32_t>>;
-  U32 x;
-  EXPECT_NE(0, x());
+  uint32_t state[2] = {0, 0};
+  seeder.generate(std::begin(state), std::end(state));
+  EXPECT_FALSE(state[0] == 0 && state[1] == 0);
 }
 
-TEST(NonsecureURBGBase, SeederWorksForU64) {
-  using U64 =
-      absl::random_internal::NonsecureURBGBase<SeederTestEngine<uint64_t>>;
+TEST(RandenPoolSeedSeqTest, SeederWorksForU64) {
+  absl::random_internal::RandenPoolSeedSeq seeder;
 
-  U64 x;
-  EXPECT_NE(0, x());
+  uint64_t state[2] = {0, 0};
+  seeder.generate(std::begin(state), std::end(state));
+  EXPECT_FALSE(state[0] == 0 && state[1] == 0);
+  EXPECT_FALSE((state[0] >> 32) == 0 && (state[1] >> 32) == 0);
+}
+
+TEST(RandenPoolSeedSeqTest, SeederWorksForS32) {
+  absl::random_internal::RandenPoolSeedSeq seeder;
+
+  int32_t state[2] = {0, 0};
+  seeder.generate(std::begin(state), std::end(state));
+  EXPECT_FALSE(state[0] == 0 && state[1] == 0);
+}
+
+TEST(RandenPoolSeedSeqTest, SeederWorksForVector) {
+  absl::random_internal::RandenPoolSeedSeq seeder;
+
+  std::vector<uint32_t> state(2);
+  seeder.generate(std::begin(state), std::end(state));
+  EXPECT_FALSE(state[0] == 0 && state[1] == 0);
 }
diff --git a/abseil-cpp/absl/random/internal/pcg_engine.h b/abseil-cpp/absl/random/internal/pcg_engine.h
index 53c23fe..e1f4ef3 100644
--- a/abseil-cpp/absl/random/internal/pcg_engine.h
+++ b/abseil-cpp/absl/random/internal/pcg_engine.h
@@ -19,6 +19,7 @@
 
 #include "absl/base/config.h"
 #include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
 #include "absl/random/internal/fastmath.h"
 #include "absl/random/internal/iostream_state_saver.h"
@@ -220,48 +221,27 @@
 template <uint64_t kMultA, uint64_t kMultB, uint64_t kIncA, uint64_t kIncB>
 class pcg128_params {
  public:
-#if ABSL_HAVE_INTRINSIC_INT128
-  using state_type = __uint128_t;
-  static inline constexpr state_type make_u128(uint64_t a, uint64_t b) {
-    return (static_cast<__uint128_t>(a) << 64) | b;
-  }
-#else
   using state_type = absl::uint128;
-  static inline constexpr state_type make_u128(uint64_t a, uint64_t b) {
-    return absl::MakeUint128(a, b);
-  }
-#endif
-
   static inline constexpr state_type multiplier() {
-    return make_u128(kMultA, kMultB);
+    return absl::MakeUint128(kMultA, kMultB);
   }
   static inline constexpr state_type increment() {
-    return make_u128(kIncA, kIncB);
+    return absl::MakeUint128(kIncA, kIncB);
   }
 };
 
 // Implementation of the PCG xsl_rr_128_64 128-bit mixing function, which
 // accepts an input of state_type and mixes it into an output of result_type.
 struct pcg_xsl_rr_128_64 {
-#if ABSL_HAVE_INTRINSIC_INT128
-  using state_type = __uint128_t;
-#else
   using state_type = absl::uint128;
-#endif
   using result_type = uint64_t;
 
   inline uint64_t operator()(state_type state) {
     // This is equivalent to the xsl_rr_128_64 mixing function.
-#if ABSL_HAVE_INTRINSIC_INT128
     uint64_t rotate = static_cast<uint64_t>(state >> 122u);
     state ^= state >> 64;
     uint64_t s = static_cast<uint64_t>(state);
-#else
-    uint64_t h = Uint128High64(state);
-    uint64_t rotate = h >> 58u;
-    uint64_t s = Uint128Low64(state) ^ h;
-#endif
-    return random_internal::rotr(s, rotate);
+    return rotr(s, static_cast<int>(rotate));
   }
 };
 
@@ -281,8 +261,8 @@
   using state_type = uint64_t;
   using result_type = uint32_t;
   inline uint32_t operator()(uint64_t state) {
-    return random_internal::rotr(
-        static_cast<uint32_t>(((state >> 18) ^ state) >> 27), state >> 59);
+    return rotr(static_cast<uint32_t>(((state >> 18) ^ state) >> 27),
+                state >> 59);
   }
 };
 
diff --git a/abseil-cpp/absl/random/internal/platform.h b/abseil-cpp/absl/random/internal/platform.h
index bbdb4e6..d779f48 100644
--- a/abseil-cpp/absl/random/internal/platform.h
+++ b/abseil-cpp/absl/random/internal/platform.h
@@ -131,7 +131,7 @@
 
 // ABSL_RANDOM_INTERNAL_AES_DISPATCH indicates whether the currently active
 // platform has, or should use run-time dispatch for selecting the
-// acclerated Randen implementation.
+// accelerated Randen implementation.
 #define ABSL_RANDOM_INTERNAL_AES_DISPATCH 0
 
 #if defined(ABSL_ARCH_X86_64)
diff --git a/abseil-cpp/absl/random/internal/pool_urbg.cc b/abseil-cpp/absl/random/internal/pool_urbg.cc
index 5bee530..5aefa7d 100644
--- a/abseil-cpp/absl/random/internal/pool_urbg.cc
+++ b/abseil-cpp/absl/random/internal/pool_urbg.cc
@@ -131,7 +131,7 @@
 }
 
 // Number of pooled urbg entries.
-static constexpr int kPoolSize = 8;
+static constexpr size_t kPoolSize = 8;
 
 // Shared pool entries.
 static absl::once_flag pool_once;
@@ -147,15 +147,15 @@
 // on subsequent runs the order within the same program may be significantly
 // different. However, as other thread IDs are not assigned sequentially,
 // this is not expected to matter.
-int GetPoolID() {
+size_t GetPoolID() {
   static_assert(kPoolSize >= 1,
                 "At least one urbg instance is required for PoolURBG");
 
-  ABSL_CONST_INIT static std::atomic<int64_t> sequence{0};
+  ABSL_CONST_INIT static std::atomic<uint64_t> sequence{0};
 
 #ifdef ABSL_HAVE_THREAD_LOCAL
-  static thread_local int my_pool_id = -1;
-  if (ABSL_PREDICT_FALSE(my_pool_id < 0)) {
+  static thread_local size_t my_pool_id = kPoolSize;
+  if (ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) {
     my_pool_id = (sequence++ % kPoolSize);
   }
   return my_pool_id;
@@ -171,8 +171,8 @@
 
   // Store the value in the pthread_{get/set}specific. However an uninitialized
   // value is 0, so add +1 to distinguish from the null value.
-  intptr_t my_pool_id =
-      reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+  uintptr_t my_pool_id =
+      reinterpret_cast<uintptr_t>(pthread_getspecific(tid_key));
   if (ABSL_PREDICT_FALSE(my_pool_id == 0)) {
     // No allocated ID, allocate the next value, cache it, and return.
     my_pool_id = (sequence++ % kPoolSize) + 1;
@@ -194,11 +194,10 @@
   // Not all the platforms that we build for have std::aligned_alloc, however
   // since we never free these objects, we can over allocate and munge the
   // pointers to the correct alignment.
-  void* memory = std::malloc(sizeof(RandenPoolEntry) + kAlignment);
-  auto x = reinterpret_cast<intptr_t>(memory);
+  uintptr_t x = reinterpret_cast<uintptr_t>(
+      new char[sizeof(RandenPoolEntry) + kAlignment]);
   auto y = x % kAlignment;
-  void* aligned =
-      (y == 0) ? memory : reinterpret_cast<void*>(x + kAlignment - y);
+  void* aligned = reinterpret_cast<void*>(y == 0 ? x : (x + kAlignment - y));
   return new (aligned) RandenPoolEntry();
 }
 
@@ -216,7 +215,7 @@
           absl::MakeSpan(seed_material))) {
     random_internal::ThrowSeedGenException();
   }
-  for (int i = 0; i < kPoolSize; i++) {
+  for (size_t i = 0; i < kPoolSize; i++) {
     shared_pools[i] = PoolAlignedAlloc();
     shared_pools[i]->Init(
         absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize));
diff --git a/abseil-cpp/absl/random/internal/randen.cc b/abseil-cpp/absl/random/internal/randen.cc
index 78a1e00..c1bc044 100644
--- a/abseil-cpp/absl/random/internal/randen.cc
+++ b/abseil-cpp/absl/random/internal/randen.cc
@@ -17,7 +17,7 @@
 #include "absl/base/internal/raw_logging.h"
 #include "absl/random/internal/randen_detect.h"
 
-// RANDen = RANDom generator or beetroots in Swiss German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
 // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
 // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
 //
diff --git a/abseil-cpp/absl/random/internal/randen.h b/abseil-cpp/absl/random/internal/randen.h
index c2834aa..9ff4a7a 100644
--- a/abseil-cpp/absl/random/internal/randen.h
+++ b/abseil-cpp/absl/random/internal/randen.h
@@ -26,7 +26,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-// RANDen = RANDom generator or beetroots in Swiss German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
 // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
 // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
 //
@@ -43,10 +43,8 @@
 
   // Generate updates the randen sponge. The outer portion of the sponge
   // (kCapacityBytes .. kStateBytes) may be consumed as PRNG state.
-  template <typename T, size_t N>
-  void Generate(T (&state)[N]) const {
-    static_assert(N * sizeof(T) == kStateBytes,
-                  "Randen::Generate() requires kStateBytes of state");
+  // REQUIRES: state points to kStateBytes of state.
+  inline void Generate(void* state) const {
 #if ABSL_RANDOM_INTERNAL_AES_DISPATCH
     // HW AES Dispatch.
     if (has_crypto_) {
@@ -65,13 +63,9 @@
 
   // Absorb incorporates additional seed material into the randen sponge.  After
   // absorb returns, Generate must be called before the state may be consumed.
-  template <typename S, size_t M, typename T, size_t N>
-  void Absorb(const S (&seed)[M], T (&state)[N]) const {
-    static_assert(M * sizeof(S) == RandenTraits::kSeedBytes,
-                  "Randen::Absorb() requires kSeedBytes of seed");
-
-    static_assert(N * sizeof(T) == RandenTraits::kStateBytes,
-                  "Randen::Absorb() requires kStateBytes of state");
+  // REQUIRES: seed points to kSeedBytes of seed.
+  // REQUIRES: state points to kStateBytes of state.
+  inline void Absorb(const void* seed, void* state) const {
 #if ABSL_RANDOM_INTERNAL_AES_DISPATCH
     // HW AES Dispatch.
     if (has_crypto_) {
diff --git a/abseil-cpp/absl/random/internal/randen_benchmarks.cc b/abseil-cpp/absl/random/internal/randen_benchmarks.cc
index f589172..ec086ce 100644
--- a/abseil-cpp/absl/random/internal/randen_benchmarks.cc
+++ b/abseil-cpp/absl/random/internal/randen_benchmarks.cc
@@ -47,8 +47,10 @@
 // Randen implementation benchmarks.
 template <typename T>
 struct AbsorbFn : public T {
-  mutable uint64_t state[kStateSizeT] = {};
-  mutable uint32_t seed[kSeedSizeT] = {};
+  // These are both cast to uint128* in the RandenHwAes implementation, so
+  // ensure they are 16 byte aligned.
+  alignas(16) mutable uint64_t state[kStateSizeT] = {};
+  alignas(16) mutable uint32_t seed[kSeedSizeT] = {};
 
   static constexpr size_t bytes() { return sizeof(seed); }
 
diff --git a/abseil-cpp/absl/random/internal/randen_detect.cc b/abseil-cpp/absl/random/internal/randen_detect.cc
index d63230c..bdeab87 100644
--- a/abseil-cpp/absl/random/internal/randen_detect.cc
+++ b/abseil-cpp/absl/random/internal/randen_detect.cc
@@ -1,13 +1,13 @@
 // Copyright 2017 The Abseil Authors.
 //
-// Licensed under the Apache License, Version 2.0 (the"License");
+// Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
 //      https://www.apache.org/licenses/LICENSE-2.0
 //
 // Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an"AS IS" BASIS,
+// distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
@@ -24,6 +24,11 @@
 
 #include "absl/random/internal/platform.h"
 
+#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
+    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+#define ABSL_HAVE_GETAUXVAL
+#endif
+
 #if defined(ABSL_ARCH_X86_64)
 #define ABSL_INTERNAL_USE_X86_CPUID
 #elif defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \
@@ -31,7 +36,7 @@
 #if defined(__ANDROID__)
 #define ABSL_INTERNAL_USE_ANDROID_GETAUXVAL
 #define ABSL_INTERNAL_USE_GETAUXVAL
-#elif defined(__linux__)
+#elif defined(__linux__) && defined(ABSL_HAVE_GETAUXVAL)
 #define ABSL_INTERNAL_USE_LINUX_GETAUXVAL
 #define ABSL_INTERNAL_USE_GETAUXVAL
 #endif
@@ -40,7 +45,10 @@
 #if defined(ABSL_INTERNAL_USE_X86_CPUID)
 #if defined(_WIN32) || defined(_WIN64)
 #include <intrin.h>  // NOLINT(build/include_order)
-#pragma intrinsic(__cpuid)
+#elif ABSL_HAVE_BUILTIN(__cpuid)
+// MSVC-equivalent __cpuid intrinsic declaration for clang-like compilers
+// for non-Windows build environments.
+extern void __cpuid(int[4], int);
 #else
 // MSVC-equivalent __cpuid intrinsic function.
 static void __cpuid(int cpu_info[4], int info_type) {
diff --git a/abseil-cpp/absl/random/internal/randen_engine.h b/abseil-cpp/absl/random/internal/randen_engine.h
index 6b33731..fe2d9f6 100644
--- a/abseil-cpp/absl/random/internal/randen_engine.h
+++ b/abseil-cpp/absl/random/internal/randen_engine.h
@@ -23,6 +23,7 @@
 #include <limits>
 #include <type_traits>
 
+#include "absl/base/internal/endian.h"
 #include "absl/meta/type_traits.h"
 #include "absl/random/internal/iostream_state_saver.h"
 #include "absl/random/internal/randen.h"
@@ -41,7 +42,7 @@
 // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
 // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
 template <typename T>
-class alignas(16) randen_engine {
+class alignas(8) randen_engine {
  public:
   // C++11 URBG interface:
   using result_type = T;
@@ -57,7 +58,8 @@
     return (std::numeric_limits<result_type>::max)();
   }
 
-  explicit randen_engine(result_type seed_value = 0) { seed(seed_value); }
+  randen_engine() : randen_engine(0) {}
+  explicit randen_engine(result_type seed_value) { seed(seed_value); }
 
   template <class SeedSequence,
             typename = typename absl::enable_if_t<
@@ -66,17 +68,27 @@
     seed(seq);
   }
 
-  randen_engine(const randen_engine&) = default;
+  // alignment requirements dictate custom copy and move constructors.
+  randen_engine(const randen_engine& other)
+      : next_(other.next_), impl_(other.impl_) {
+    std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
+  }
+  randen_engine& operator=(const randen_engine& other) {
+    next_ = other.next_;
+    impl_ = other.impl_;
+    std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
+    return *this;
+  }
 
   // Returns random bits from the buffer in units of result_type.
   result_type operator()() {
     // Refill the buffer if needed (unlikely).
+    auto* begin = state();
     if (next_ >= kStateSizeT) {
       next_ = kCapacityT;
-      impl_.Generate(state_);
+      impl_.Generate(begin);
     }
-
-    return state_[next_++];
+    return little_endian::ToHost(begin[next_++]);
   }
 
   template <class SeedSequence>
@@ -91,9 +103,10 @@
   void seed(result_type seed_value = 0) {
     next_ = kStateSizeT;
     // Zeroes the inner state and fills the outer state with seed_value to
-    // mimics behaviour of reseed
-    std::fill(std::begin(state_), std::begin(state_) + kCapacityT, 0);
-    std::fill(std::begin(state_) + kCapacityT, std::end(state_), seed_value);
+    // mimic the behaviour of reseed
+    auto* begin = state();
+    std::fill(begin, begin + kCapacityT, 0);
+    std::fill(begin + kCapacityT, begin + kStateSizeT, seed_value);
   }
 
   // Inserts entropy into (part of) the state. Calling this periodically with
@@ -104,7 +117,6 @@
     using sequence_result_type = typename SeedSequence::result_type;
     static_assert(sizeof(sequence_result_type) == 4,
                   "SeedSequence::result_type must be 32-bit");
-
     constexpr size_t kBufferSize =
         Randen::kSeedBytes / sizeof(sequence_result_type);
     alignas(16) sequence_result_type buffer[kBufferSize];
@@ -118,12 +130,19 @@
     if (entropy_size < kBufferSize) {
       // ... and only request that many values, or 256-bits, when unspecified.
       const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size;
-      std::fill(std::begin(buffer) + requested_entropy, std::end(buffer), 0);
-      seq.generate(std::begin(buffer), std::begin(buffer) + requested_entropy);
+      std::fill(buffer + requested_entropy, buffer + kBufferSize, 0);
+      seq.generate(buffer, buffer + requested_entropy);
+#ifdef ABSL_IS_BIG_ENDIAN
+      // Randen expects the seed buffer to be in Little Endian; reverse it on
+      // Big Endian platforms.
+      for (sequence_result_type& e : buffer) {
+        e = absl::little_endian::FromHost(e);
+      }
+#endif
       // The Randen paper suggests preferentially initializing even-numbered
       // 128-bit vectors of the randen state (there are 16 such vectors).
       // The seed data is merged into the state offset by 128-bits, which
-      // implies prefering seed bytes [16..31, ..., 208..223]. Since the
+      // implies preferring seed bytes [16..31, ..., 208..223]. Since the
       // buffer is 32-bit values, we swap the corresponding buffer positions in
       // 128-bit chunks.
       size_t dst = kBufferSize;
@@ -138,9 +157,9 @@
         std::swap(buffer[--dst], buffer[--src]);
       }
     } else {
-      seq.generate(std::begin(buffer), std::end(buffer));
+      seq.generate(buffer, buffer + kBufferSize);
     }
-    impl_.Absorb(buffer, state_);
+    impl_.Absorb(buffer, state());
 
     // Generate will be called when operator() is called
     next_ = kStateSizeT;
@@ -151,9 +170,10 @@
     count -= step;
 
     constexpr uint64_t kRateT = kStateSizeT - kCapacityT;
+    auto* begin = state();
     while (count > 0) {
       next_ = kCapacityT;
-      impl_.Generate(state_);
+      impl_.Generate(*reinterpret_cast<result_type(*)[kStateSizeT]>(begin));
       step = std::min<uint64_t>(kRateT, count);
       count -= step;
     }
@@ -161,9 +181,9 @@
   }
 
   bool operator==(const randen_engine& other) const {
+    const auto* begin = state();
     return next_ == other.next_ &&
-           std::equal(std::begin(state_), std::end(state_),
-                      std::begin(other.state_));
+           std::equal(begin, begin + kStateSizeT, other.state());
   }
 
   bool operator!=(const randen_engine& other) const {
@@ -177,11 +197,13 @@
     using numeric_type =
         typename random_internal::stream_format_type<result_type>::type;
     auto saver = random_internal::make_ostream_state_saver(os);
-    for (const auto& elem : engine.state_) {
+    auto* it = engine.state();
+    for (auto* end = it + kStateSizeT; it < end; ++it) {
       // In the case that `elem` is `uint8_t`, it must be cast to something
       // larger so that it prints as an integer rather than a character. For
       // simplicity, apply the cast all circumstances.
-      os << static_cast<numeric_type>(elem) << os.fill();
+      os << static_cast<numeric_type>(little_endian::FromHost(*it))
+         << os.fill();
     }
     os << engine.next_;
     return os;
@@ -200,13 +222,13 @@
       // necessary to read a wider type and then cast it to uint8_t.
       numeric_type value;
       is >> value;
-      elem = static_cast<result_type>(value);
+      elem = little_endian::ToHost(static_cast<result_type>(value));
     }
     is >> next;
     if (is.fail()) {
       return is;
     }
-    std::memcpy(engine.state_, state, sizeof(engine.state_));
+    std::memcpy(engine.state(), state, sizeof(state));
     engine.next_ = next;
     return is;
   }
@@ -217,9 +239,21 @@
   static constexpr size_t kCapacityT =
       Randen::kCapacityBytes / sizeof(result_type);
 
-  // First kCapacityT are `inner', the others are accessible random bits.
-  alignas(16) result_type state_[kStateSizeT];
-  size_t next_;  // index within state_
+  // Returns the state array pointer, which is aligned to 16 bytes.
+  // The first kCapacityT are the `inner' sponge; the remainder are available.
+  result_type* state() {
+    return reinterpret_cast<result_type*>(
+        (reinterpret_cast<uintptr_t>(&raw_state_) & 0xf) ? (raw_state_ + 8)
+                                                         : raw_state_);
+  }
+  const result_type* state() const {
+    return const_cast<randen_engine*>(this)->state();
+  }
+
+  // raw state array, manually aligned in state(). This overallocates
+  // by 8 bytes since C++ does not guarantee extended heap alignment.
+  alignas(8) char raw_state_[Randen::kStateBytes + 8];
+  size_t next_;  // index within state()
   Randen impl_;
 };
 
diff --git a/abseil-cpp/absl/random/internal/randen_engine_test.cc b/abseil-cpp/absl/random/internal/randen_engine_test.cc
index c8e7685..a94f491 100644
--- a/abseil-cpp/absl/random/internal/randen_engine_test.cc
+++ b/abseil-cpp/absl/random/internal/randen_engine_test.cc
@@ -21,7 +21,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/explicit_seed_seq.h"
 #include "absl/strings/str_cat.h"
 #include "absl/time/clock.h"
@@ -645,9 +645,8 @@
   }
   auto duration = absl::GetCurrentTimeNanos() - start;
 
-  ABSL_INTERNAL_LOG(INFO, absl::StrCat(static_cast<double>(duration) /
-                                           static_cast<double>(kCount),
-                                       "ns"));
+  LOG(INFO) << static_cast<double>(duration) / static_cast<double>(kCount)
+            << "ns";
 
   EXPECT_GT(sum, 0);
   EXPECT_GE(duration, kCount);  // Should be slower than 1ns per call.
diff --git a/abseil-cpp/absl/random/internal/randen_hwaes.cc b/abseil-cpp/absl/random/internal/randen_hwaes.cc
index b5a3f90..f535f4c 100644
--- a/abseil-cpp/absl/random/internal/randen_hwaes.cc
+++ b/abseil-cpp/absl/random/internal/randen_hwaes.cc
@@ -23,49 +23,20 @@
 #include <cstring>
 
 #include "absl/base/attributes.h"
+#include "absl/numeric/int128.h"
 #include "absl/random/internal/platform.h"
 #include "absl/random/internal/randen_traits.h"
 
 // ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain
 // a hardware accelerated implementation of randen, or whether it
 // will contain stubs that exit the process.
-#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32)
-// The platform.h directives are sufficient to indicate whether
-// we should build accelerated implementations for x86.
-#if (ABSL_HAVE_ACCELERATED_AES || ABSL_RANDOM_INTERNAL_AES_DISPATCH)
-#define ABSL_RANDEN_HWAES_IMPL 1
-#endif
-#elif defined(ABSL_ARCH_PPC)
-// The platform.h directives are sufficient to indicate whether
-// we should build accelerated implementations for PPC.
-//
-// NOTE: This has mostly been tested on 64-bit Power variants,
-// and not embedded cpus such as powerpc32-8540
 #if ABSL_HAVE_ACCELERATED_AES
+// The following platforms have implemented RandenHwAes.
+#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) || \
+    defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) ||       \
+    defined(ABSL_ARCH_AARCH64)
 #define ABSL_RANDEN_HWAES_IMPL 1
 #endif
-#elif defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64)
-// ARM is somewhat more complicated. We might support crypto natively...
-#if ABSL_HAVE_ACCELERATED_AES || \
-    (defined(__ARM_NEON) && defined(__ARM_FEATURE_CRYPTO))
-#define ABSL_RANDEN_HWAES_IMPL 1
-
-#elif ABSL_RANDOM_INTERNAL_AES_DISPATCH && !defined(__APPLE__) && \
-    (defined(__GNUC__) && __GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ > 9)
-// ...or, on GCC, we can use an ASM directive to
-// instruct the assember to allow crypto instructions.
-#define ABSL_RANDEN_HWAES_IMPL 1
-#define ABSL_RANDEN_HWAES_IMPL_CRYPTO_DIRECTIVE 1
-#endif
-#else
-// HWAES is unsupported by these architectures / platforms:
-//   __myriad2__
-//   __mips__
-//
-// Other architectures / platforms are unknown.
-//
-// See the Abseil documentation on supported macros at:
-// https://abseil.io/docs/cpp/platforms/macros
 #endif
 
 #if !defined(ABSL_RANDEN_HWAES_IMPL)
@@ -120,11 +91,6 @@
 
 using absl::random_internal::RandenTraits;
 
-// Randen operates on 128-bit vectors.
-struct alignas(16) u64x2 {
-  uint64_t data[2];
-};
-
 }  // namespace
 
 // TARGET_CRYPTO defines a crypto attribute for each architecture.
@@ -186,7 +152,7 @@
 }
 
 // Enables native loads in the round loop by pre-swapping.
-inline ABSL_TARGET_CRYPTO void SwapEndian(u64x2* state) {
+inline ABSL_TARGET_CRYPTO void SwapEndian(absl::uint128* state) {
   for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) {
     Vector128Store(ReverseBytes(Vector128Load(state + block)), state + block);
   }
@@ -196,22 +162,6 @@
 
 #elif defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64)
 
-// This asm directive will cause the file to be compiled with crypto extensions
-// whether or not the cpu-architecture supports it.
-#if ABSL_RANDEN_HWAES_IMPL_CRYPTO_DIRECTIVE
-asm(".arch_extension  crypto\n");
-
-// Override missing defines.
-#if !defined(__ARM_NEON)
-#define __ARM_NEON 1
-#endif
-
-#if !defined(__ARM_FEATURE_CRYPTO)
-#define __ARM_FEATURE_CRYPTO 1
-#endif
-
-#endif
-
 // Rely on the ARM NEON+Crypto advanced simd types, defined in <arm_neon.h>.
 // uint8x16_t is the user alias for underlying __simd128_uint8_t type.
 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0073a/IHI0073A_arm_neon_intrinsics_ref.pdf
@@ -261,7 +211,7 @@
 
 #elif defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32)
 // On x86 we rely on the aesni instructions
-#include <wmmintrin.h>
+#include <immintrin.h>
 
 namespace {
 
@@ -270,7 +220,7 @@
 class Vector128 {
  public:
   // Convert from/to intrinsics.
-  inline explicit Vector128(const __m128i& Vector128) : data_(Vector128) {}
+  inline explicit Vector128(const __m128i& v) : data_(v) {}
 
   inline __m128i data() const { return data_; }
 
@@ -327,7 +277,7 @@
 
 // Block shuffles applies a shuffle to the entire state between AES rounds.
 // Improved odd-even shuffle from "New criterion for diffusion property".
-inline ABSL_TARGET_CRYPTO void BlockShuffle(u64x2* state) {
+inline ABSL_TARGET_CRYPTO void BlockShuffle(absl::uint128* state) {
   static_assert(RandenTraits::kFeistelBlocks == 16,
                 "Expecting 16 FeistelBlocks.");
 
@@ -374,8 +324,9 @@
 // per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
 // parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
 // XORs are 'free' (included in the second AES instruction).
-inline ABSL_TARGET_CRYPTO const u64x2* FeistelRound(
-    u64x2* state, const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
+inline ABSL_TARGET_CRYPTO const absl::uint128* FeistelRound(
+    absl::uint128* state,
+    const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
   static_assert(RandenTraits::kFeistelBlocks == 16,
                 "Expecting 16 FeistelBlocks.");
 
@@ -436,7 +387,8 @@
 // 2^64 queries if the round function is a PRF. This is similar to the b=8 case
 // of Simpira v2, but more efficient than its generic construction for b=16.
 inline ABSL_TARGET_CRYPTO void Permute(
-    u64x2* state, const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
+    absl::uint128* state,
+    const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
   // (Successfully unrolled; the first iteration jumps into the second half)
 #ifdef __clang__
 #pragma clang loop unroll_count(2)
@@ -473,10 +425,11 @@
   static_assert(RandenTraits::kStateBytes / sizeof(Vector128) == 16,
                 "Unexpected Randen kStateBlocks");
 
-  auto* state =
-      reinterpret_cast<u64x2 * ABSL_RANDOM_INTERNAL_RESTRICT>(state_void);
+  auto* state = reinterpret_cast<absl::uint128 * ABSL_RANDOM_INTERNAL_RESTRICT>(
+      state_void);
   const auto* seed =
-      reinterpret_cast<const u64x2 * ABSL_RANDOM_INTERNAL_RESTRICT>(seed_void);
+      reinterpret_cast<const absl::uint128 * ABSL_RANDOM_INTERNAL_RESTRICT>(
+          seed_void);
 
   Vector128 b1 = Vector128Load(state + 1);
   b1 ^= Vector128Load(seed + 0);
@@ -545,8 +498,8 @@
   static_assert(RandenTraits::kCapacityBytes == sizeof(Vector128),
                 "Capacity mismatch");
 
-  auto* state = reinterpret_cast<u64x2*>(state_void);
-  const auto* keys = reinterpret_cast<const u64x2*>(keys_void);
+  auto* state = reinterpret_cast<absl::uint128*>(state_void);
+  const auto* keys = reinterpret_cast<const absl::uint128*>(keys_void);
 
   const Vector128 prev_inner = Vector128Load(state);
 
diff --git a/abseil-cpp/absl/random/internal/randen_hwaes.h b/abseil-cpp/absl/random/internal/randen_hwaes.h
index bce36b5..71a7f69 100644
--- a/abseil-cpp/absl/random/internal/randen_hwaes.h
+++ b/abseil-cpp/absl/random/internal/randen_hwaes.h
@@ -26,7 +26,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-// RANDen = RANDom generator or beetroots in Swiss German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
 // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
 // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
 //
diff --git a/abseil-cpp/absl/random/internal/randen_hwaes_test.cc b/abseil-cpp/absl/random/internal/randen_hwaes_test.cc
index 66ddb43..00d96ef 100644
--- a/abseil-cpp/absl/random/internal/randen_hwaes_test.cc
+++ b/abseil-cpp/absl/random/internal/randen_hwaes_test.cc
@@ -16,7 +16,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/platform.h"
 #include "absl/random/internal/randen_detect.h"
 #include "absl/random/internal/randen_traits.h"
@@ -27,44 +27,39 @@
 using absl::random_internal::RandenHwAes;
 using absl::random_internal::RandenTraits;
 
-// Local state parameters.
-constexpr size_t kSeedBytes =
-    RandenTraits::kStateBytes - RandenTraits::kCapacityBytes;
-constexpr size_t kStateSizeT = RandenTraits::kStateBytes / sizeof(uint64_t);
-constexpr size_t kSeedSizeT = kSeedBytes / sizeof(uint32_t);
-
-struct alignas(16) randen {
-  uint64_t state[kStateSizeT];
-  uint32_t seed[kSeedSizeT];
-};
-
 TEST(RandenHwAesTest, Default) {
   EXPECT_TRUE(absl::random_internal::CPUSupportsRandenHwAes());
 
-  constexpr uint64_t kGolden[] = {
-      0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977,
-      0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912,
-      0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6,
-      0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a,
-      0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37,
-      0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556,
-      0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42,
-      0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc,
-      0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f,
-      0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3,
-      0x026ff374c101da7e, 0x811ef0821c3de851,
+  constexpr uint8_t kGolden[] = {
+      0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
+      0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
+      0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
+      0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
+      0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
+      0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
+      0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
+      0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
+      0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
+      0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
+      0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
+      0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
+      0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
+      0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
+      0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
+      0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
+      0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
+      0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
+      0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
+      0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
+      0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
+      0x82, 0xf0, 0x1e, 0x81,
   };
 
-  alignas(16) randen d;
-  memset(d.state, 0, sizeof(d.state));
-  RandenHwAes::Generate(RandenHwAes::GetKeys(), d.state);
+  alignas(16) uint8_t state[RandenTraits::kStateBytes];
+  std::memset(state, 0, sizeof(state));
 
-  uint64_t* id = d.state;
-  for (const auto& elem : kGolden) {
-    auto a = absl::StrFormat("%#x", elem);
-    auto b = absl::StrFormat("%#x", *id++);
-    EXPECT_EQ(a, b);
-  }
+  RandenHwAes::Generate(RandenHwAes::GetKeys(), state);
+  EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
 }
 
 }  // namespace
@@ -72,32 +67,32 @@
 int main(int argc, char* argv[]) {
   testing::InitGoogleTest(&argc, argv);
 
-  ABSL_RAW_LOG(INFO, "ABSL_HAVE_ACCELERATED_AES=%d", ABSL_HAVE_ACCELERATED_AES);
-  ABSL_RAW_LOG(INFO, "ABSL_RANDOM_INTERNAL_AES_DISPATCH=%d",
-               ABSL_RANDOM_INTERNAL_AES_DISPATCH);
+  LOG(INFO) << "ABSL_HAVE_ACCELERATED_AES=" << ABSL_HAVE_ACCELERATED_AES;
+  LOG(INFO) << "ABSL_RANDOM_INTERNAL_AES_DISPATCH="
+            << ABSL_RANDOM_INTERNAL_AES_DISPATCH;
 
 #if defined(ABSL_ARCH_X86_64)
-  ABSL_RAW_LOG(INFO, "ABSL_ARCH_X86_64");
+  LOG(INFO) << "ABSL_ARCH_X86_64";
 #elif defined(ABSL_ARCH_X86_32)
-  ABSL_RAW_LOG(INFO, "ABSL_ARCH_X86_32");
+  LOG(INFO) << "ABSL_ARCH_X86_32";
 #elif defined(ABSL_ARCH_AARCH64)
-  ABSL_RAW_LOG(INFO, "ABSL_ARCH_AARCH64");
+  LOG(INFO) << "ABSL_ARCH_AARCH64";
 #elif defined(ABSL_ARCH_ARM)
-  ABSL_RAW_LOG(INFO, "ABSL_ARCH_ARM");
+  LOG(INFO) << "ABSL_ARCH_ARM";
 #elif defined(ABSL_ARCH_PPC)
-  ABSL_RAW_LOG(INFO, "ABSL_ARCH_PPC");
+  LOG(INFO) << "ABSL_ARCH_PPC";
 #else
-  ABSL_RAW_LOG(INFO, "ARCH Unknown");
+  LOG(INFO) << "ARCH Unknown";
 #endif
 
   int x = absl::random_internal::HasRandenHwAesImplementation();
-  ABSL_RAW_LOG(INFO, "HasRandenHwAesImplementation = %d", x);
+  LOG(INFO) << "HasRandenHwAesImplementation = " << x;
 
   int y = absl::random_internal::CPUSupportsRandenHwAes();
-  ABSL_RAW_LOG(INFO, "CPUSupportsRandenHwAes = %d", x);
+  LOG(INFO) << "CPUSupportsRandenHwAes = " << x;
 
   if (!x || !y) {
-    ABSL_RAW_LOG(INFO, "Skipping Randen HWAES tests.");
+    LOG(INFO) << "Skipping Randen HWAES tests.";
     return 0;
   }
   return RUN_ALL_TESTS();
diff --git a/abseil-cpp/absl/random/internal/randen_slow.cc b/abseil-cpp/absl/random/internal/randen_slow.cc
index 4e5f3dc..9bfd2a4 100644
--- a/abseil-cpp/absl/random/internal/randen_slow.cc
+++ b/abseil-cpp/absl/random/internal/randen_slow.cc
@@ -19,6 +19,8 @@
 #include <cstring>
 
 #include "absl/base/attributes.h"
+#include "absl/base/internal/endian.h"
+#include "absl/numeric/int128.h"
 #include "absl/random/internal/platform.h"
 #include "absl/random/internal/randen_traits.h"
 
@@ -38,192 +40,193 @@
 namespace {
 
 // AES portions based on rijndael-alg-fst.c,
-// https://fastcrypto.org/front/misc/rijndael-alg-fst.c
+// https://fastcrypto.org/front/misc/rijndael-alg-fst.c, and modified for
+// platform-endianness.
 //
 // Implementation of
 // http://www.csrc.nist.gov/publications/fips/fips197/fips-197.pdf
 constexpr uint32_t te0[256] = {
-    0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd,
-    0xde6f6fb1, 0x91c5c554, 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
-    0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a, 0x8fcaca45, 0x1f82829d,
-    0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
-    0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7,
-    0xe4727296, 0x9bc0c05b, 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
-    0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f, 0x6834345c, 0x51a5a5f4,
-    0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
-    0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1,
-    0x0a05050f, 0x2f9a9ab5, 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
-    0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f, 0x1209091b, 0x1d83839e,
-    0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
-    0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e,
-    0x5e2f2f71, 0x13848497, 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
-    0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed, 0xd46a6abe, 0x8dcbcb46,
-    0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
-    0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7,
-    0x66333355, 0x11858594, 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
-    0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3, 0xa25151f3, 0x5da3a3fe,
-    0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
-    0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a,
-    0xfdf3f30e, 0xbfd2d26d, 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
-    0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739, 0x93c4c457, 0x55a7a7f2,
-    0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
-    0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e,
-    0x3b9090ab, 0x0b888883, 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
-    0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76, 0xdbe0e03b, 0x64323256,
-    0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
-    0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4,
-    0xd3e4e437, 0xf279798b, 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
-    0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0, 0xd86c6cb4, 0xac5656fa,
-    0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
-    0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1,
-    0x73b4b4c7, 0x97c6c651, 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
-    0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85, 0xe0707090, 0x7c3e3e42,
-    0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
-    0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158,
-    0x3a1d1d27, 0x279e9eb9, 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
-    0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7, 0x2d9b9bb6, 0x3c1e1e22,
-    0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
-    0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631,
-    0x844242c6, 0xd06868b8, 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
-    0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a,
+    0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, 0x0df2f2ff, 0xbd6b6bd6,
+    0xb16f6fde, 0x54c5c591, 0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56,
+    0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec, 0x45caca8f, 0x9d82821f,
+    0x40c9c989, 0x877d7dfa, 0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb,
+    0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45, 0xbf9c9c23, 0xf7a4a453,
+    0x967272e4, 0x5bc0c09b, 0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c,
+    0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83, 0x5c343468, 0xf4a5a551,
+    0x34e5e5d1, 0x08f1f1f9, 0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a,
+    0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d, 0x28181830, 0xa1969637,
+    0x0f05050a, 0xb59a9a2f, 0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df,
+    0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea, 0x1b090912, 0x9e83831d,
+    0x742c2c58, 0x2e1a1a34, 0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b,
+    0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d, 0x7b292952, 0x3ee3e3dd,
+    0x712f2f5e, 0x97848413, 0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1,
+    0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6, 0xbe6a6ad4, 0x46cbcb8d,
+    0xd9bebe67, 0x4b393972, 0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85,
+    0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed, 0xc5434386, 0xd74d4d9a,
+    0x55333366, 0x94858511, 0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe,
+    0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b, 0xf35151a2, 0xfea3a35d,
+    0xc0404080, 0x8a8f8f05, 0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1,
+    0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142, 0x30101020, 0x1affffe5,
+    0x0ef3f3fd, 0x6dd2d2bf, 0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3,
+    0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e, 0x57c4c493, 0xf2a7a755,
+    0x827e7efc, 0x473d3d7a, 0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6,
+    0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3, 0x66222244, 0x7e2a2a54,
+    0xab90903b, 0x8388880b, 0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428,
+    0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad, 0x3be0e0db, 0x56323264,
+    0x4e3a3a74, 0x1e0a0a14, 0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8,
+    0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4, 0xa8919139, 0xa4959531,
+    0x37e4e4d3, 0x8b7979f2, 0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda,
+    0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949, 0xb46c6cd8, 0xfa5656ac,
+    0x07f4f4f3, 0x25eaeacf, 0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810,
+    0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c, 0x241c1c38, 0xf1a6a657,
+    0xc7b4b473, 0x51c6c697, 0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e,
+    0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f, 0x907070e0, 0x423e3e7c,
+    0xc4b5b571, 0xaa6666cc, 0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c,
+    0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969, 0x91868617, 0x58c1c199,
+    0x271d1d3a, 0xb99e9e27, 0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122,
+    0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433, 0xb69b9b2d, 0x221e1e3c,
+    0x92878715, 0x20e9e9c9, 0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5,
+    0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a, 0xdabfbf65, 0x31e6e6d7,
+    0xc6424284, 0xb86868d0, 0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e,
+    0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c,
 };
 
 constexpr uint32_t te1[256] = {
-    0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b,
-    0xb1de6f6f, 0x5491c5c5, 0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b,
-    0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676, 0x458fcaca, 0x9d1f8282,
-    0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0,
-    0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4,
-    0x96e47272, 0x5b9bc0c0, 0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626,
-    0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc, 0x5c683434, 0xf451a5a5,
-    0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515,
-    0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696,
-    0x0f0a0505, 0xb52f9a9a, 0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2,
-    0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575, 0x1b120909, 0x9e1d8383,
-    0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0,
-    0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3,
-    0x715e2f2f, 0x97138484, 0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded,
-    0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b, 0xbed46a6a, 0x468dcbcb,
-    0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf,
-    0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d,
-    0x55663333, 0x94118585, 0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f,
-    0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8, 0xf3a25151, 0xfe5da3a3,
-    0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5,
-    0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff,
-    0x0efdf3f3, 0x6dbfd2d2, 0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec,
-    0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717, 0x5793c4c4, 0xf255a7a7,
-    0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373,
-    0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a,
-    0xab3b9090, 0x830b8888, 0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414,
-    0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb, 0x3bdbe0e0, 0x56643232,
-    0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c,
-    0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595,
-    0x37d3e4e4, 0x8bf27979, 0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d,
-    0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9, 0xb4d86c6c, 0xfaac5656,
-    0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808,
-    0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6,
-    0xc773b4b4, 0x5197c6c6, 0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f,
-    0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a, 0x90e07070, 0x427c3e3e,
-    0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e,
-    0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1,
-    0x273a1d1d, 0xb9279e9e, 0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111,
-    0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494, 0xb62d9b9b, 0x223c1e1e,
-    0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf,
-    0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6,
-    0xc6844242, 0xb8d06868, 0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f,
-    0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616,
+    0x6363c6a5, 0x7c7cf884, 0x7777ee99, 0x7b7bf68d, 0xf2f2ff0d, 0x6b6bd6bd,
+    0x6f6fdeb1, 0xc5c59154, 0x30306050, 0x01010203, 0x6767cea9, 0x2b2b567d,
+    0xfefee719, 0xd7d7b562, 0xabab4de6, 0x7676ec9a, 0xcaca8f45, 0x82821f9d,
+    0xc9c98940, 0x7d7dfa87, 0xfafaef15, 0x5959b2eb, 0x47478ec9, 0xf0f0fb0b,
+    0xadad41ec, 0xd4d4b367, 0xa2a25ffd, 0xafaf45ea, 0x9c9c23bf, 0xa4a453f7,
+    0x7272e496, 0xc0c09b5b, 0xb7b775c2, 0xfdfde11c, 0x93933dae, 0x26264c6a,
+    0x36366c5a, 0x3f3f7e41, 0xf7f7f502, 0xcccc834f, 0x3434685c, 0xa5a551f4,
+    0xe5e5d134, 0xf1f1f908, 0x7171e293, 0xd8d8ab73, 0x31316253, 0x15152a3f,
+    0x0404080c, 0xc7c79552, 0x23234665, 0xc3c39d5e, 0x18183028, 0x969637a1,
+    0x05050a0f, 0x9a9a2fb5, 0x07070e09, 0x12122436, 0x80801b9b, 0xe2e2df3d,
+    0xebebcd26, 0x27274e69, 0xb2b27fcd, 0x7575ea9f, 0x0909121b, 0x83831d9e,
+    0x2c2c5874, 0x1a1a342e, 0x1b1b362d, 0x6e6edcb2, 0x5a5ab4ee, 0xa0a05bfb,
+    0x5252a4f6, 0x3b3b764d, 0xd6d6b761, 0xb3b37dce, 0x2929527b, 0xe3e3dd3e,
+    0x2f2f5e71, 0x84841397, 0x5353a6f5, 0xd1d1b968, 0x00000000, 0xededc12c,
+    0x20204060, 0xfcfce31f, 0xb1b179c8, 0x5b5bb6ed, 0x6a6ad4be, 0xcbcb8d46,
+    0xbebe67d9, 0x3939724b, 0x4a4a94de, 0x4c4c98d4, 0x5858b0e8, 0xcfcf854a,
+    0xd0d0bb6b, 0xefefc52a, 0xaaaa4fe5, 0xfbfbed16, 0x434386c5, 0x4d4d9ad7,
+    0x33336655, 0x85851194, 0x45458acf, 0xf9f9e910, 0x02020406, 0x7f7ffe81,
+    0x5050a0f0, 0x3c3c7844, 0x9f9f25ba, 0xa8a84be3, 0x5151a2f3, 0xa3a35dfe,
+    0x404080c0, 0x8f8f058a, 0x92923fad, 0x9d9d21bc, 0x38387048, 0xf5f5f104,
+    0xbcbc63df, 0xb6b677c1, 0xdadaaf75, 0x21214263, 0x10102030, 0xffffe51a,
+    0xf3f3fd0e, 0xd2d2bf6d, 0xcdcd814c, 0x0c0c1814, 0x13132635, 0xececc32f,
+    0x5f5fbee1, 0x979735a2, 0x444488cc, 0x17172e39, 0xc4c49357, 0xa7a755f2,
+    0x7e7efc82, 0x3d3d7a47, 0x6464c8ac, 0x5d5dbae7, 0x1919322b, 0x7373e695,
+    0x6060c0a0, 0x81811998, 0x4f4f9ed1, 0xdcdca37f, 0x22224466, 0x2a2a547e,
+    0x90903bab, 0x88880b83, 0x46468cca, 0xeeeec729, 0xb8b86bd3, 0x1414283c,
+    0xdedea779, 0x5e5ebce2, 0x0b0b161d, 0xdbdbad76, 0xe0e0db3b, 0x32326456,
+    0x3a3a744e, 0x0a0a141e, 0x494992db, 0x06060c0a, 0x2424486c, 0x5c5cb8e4,
+    0xc2c29f5d, 0xd3d3bd6e, 0xacac43ef, 0x6262c4a6, 0x919139a8, 0x959531a4,
+    0xe4e4d337, 0x7979f28b, 0xe7e7d532, 0xc8c88b43, 0x37376e59, 0x6d6ddab7,
+    0x8d8d018c, 0xd5d5b164, 0x4e4e9cd2, 0xa9a949e0, 0x6c6cd8b4, 0x5656acfa,
+    0xf4f4f307, 0xeaeacf25, 0x6565caaf, 0x7a7af48e, 0xaeae47e9, 0x08081018,
+    0xbaba6fd5, 0x7878f088, 0x25254a6f, 0x2e2e5c72, 0x1c1c3824, 0xa6a657f1,
+    0xb4b473c7, 0xc6c69751, 0xe8e8cb23, 0xdddda17c, 0x7474e89c, 0x1f1f3e21,
+    0x4b4b96dd, 0xbdbd61dc, 0x8b8b0d86, 0x8a8a0f85, 0x7070e090, 0x3e3e7c42,
+    0xb5b571c4, 0x6666ccaa, 0x484890d8, 0x03030605, 0xf6f6f701, 0x0e0e1c12,
+    0x6161c2a3, 0x35356a5f, 0x5757aef9, 0xb9b969d0, 0x86861791, 0xc1c19958,
+    0x1d1d3a27, 0x9e9e27b9, 0xe1e1d938, 0xf8f8eb13, 0x98982bb3, 0x11112233,
+    0x6969d2bb, 0xd9d9a970, 0x8e8e0789, 0x949433a7, 0x9b9b2db6, 0x1e1e3c22,
+    0x87871592, 0xe9e9c920, 0xcece8749, 0x5555aaff, 0x28285078, 0xdfdfa57a,
+    0x8c8c038f, 0xa1a159f8, 0x89890980, 0x0d0d1a17, 0xbfbf65da, 0xe6e6d731,
+    0x424284c6, 0x6868d0b8, 0x414182c3, 0x999929b0, 0x2d2d5a77, 0x0f0f1e11,
+    0xb0b07bcb, 0x5454a8fc, 0xbbbb6dd6, 0x16162c3a,
 };
 
 constexpr uint32_t te2[256] = {
-    0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b,
-    0x6fb1de6f, 0xc55491c5, 0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b,
-    0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76, 0xca458fca, 0x829d1f82,
-    0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0,
-    0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4,
-    0x7296e472, 0xc05b9bc0, 0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26,
-    0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc, 0x345c6834, 0xa5f451a5,
-    0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15,
-    0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796,
-    0x050f0a05, 0x9ab52f9a, 0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2,
-    0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75, 0x091b1209, 0x839e1d83,
-    0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0,
-    0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3,
-    0x2f715e2f, 0x84971384, 0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed,
-    0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b, 0x6abed46a, 0xcb468dcb,
-    0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf,
-    0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d,
-    0x33556633, 0x85941185, 0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f,
-    0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8, 0x51f3a251, 0xa3fe5da3,
-    0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5,
-    0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff,
-    0xf30efdf3, 0xd26dbfd2, 0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec,
-    0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17, 0xc45793c4, 0xa7f255a7,
-    0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673,
-    0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a,
-    0x90ab3b90, 0x88830b88, 0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814,
-    0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb, 0xe03bdbe0, 0x32566432,
-    0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c,
-    0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195,
-    0xe437d3e4, 0x798bf279, 0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d,
-    0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9, 0x6cb4d86c, 0x56faac56,
-    0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008,
-    0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6,
-    0xb4c773b4, 0xc65197c6, 0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f,
-    0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a, 0x7090e070, 0x3e427c3e,
-    0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e,
-    0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1,
-    0x1d273a1d, 0x9eb9279e, 0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211,
-    0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394, 0x9bb62d9b, 0x1e223c1e,
-    0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df,
-    0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6,
-    0x42c68442, 0x68b8d068, 0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f,
-    0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16,
+    0x63c6a563, 0x7cf8847c, 0x77ee9977, 0x7bf68d7b, 0xf2ff0df2, 0x6bd6bd6b,
+    0x6fdeb16f, 0xc59154c5, 0x30605030, 0x01020301, 0x67cea967, 0x2b567d2b,
+    0xfee719fe, 0xd7b562d7, 0xab4de6ab, 0x76ec9a76, 0xca8f45ca, 0x821f9d82,
+    0xc98940c9, 0x7dfa877d, 0xfaef15fa, 0x59b2eb59, 0x478ec947, 0xf0fb0bf0,
+    0xad41ecad, 0xd4b367d4, 0xa25ffda2, 0xaf45eaaf, 0x9c23bf9c, 0xa453f7a4,
+    0x72e49672, 0xc09b5bc0, 0xb775c2b7, 0xfde11cfd, 0x933dae93, 0x264c6a26,
+    0x366c5a36, 0x3f7e413f, 0xf7f502f7, 0xcc834fcc, 0x34685c34, 0xa551f4a5,
+    0xe5d134e5, 0xf1f908f1, 0x71e29371, 0xd8ab73d8, 0x31625331, 0x152a3f15,
+    0x04080c04, 0xc79552c7, 0x23466523, 0xc39d5ec3, 0x18302818, 0x9637a196,
+    0x050a0f05, 0x9a2fb59a, 0x070e0907, 0x12243612, 0x801b9b80, 0xe2df3de2,
+    0xebcd26eb, 0x274e6927, 0xb27fcdb2, 0x75ea9f75, 0x09121b09, 0x831d9e83,
+    0x2c58742c, 0x1a342e1a, 0x1b362d1b, 0x6edcb26e, 0x5ab4ee5a, 0xa05bfba0,
+    0x52a4f652, 0x3b764d3b, 0xd6b761d6, 0xb37dceb3, 0x29527b29, 0xe3dd3ee3,
+    0x2f5e712f, 0x84139784, 0x53a6f553, 0xd1b968d1, 0x00000000, 0xedc12ced,
+    0x20406020, 0xfce31ffc, 0xb179c8b1, 0x5bb6ed5b, 0x6ad4be6a, 0xcb8d46cb,
+    0xbe67d9be, 0x39724b39, 0x4a94de4a, 0x4c98d44c, 0x58b0e858, 0xcf854acf,
+    0xd0bb6bd0, 0xefc52aef, 0xaa4fe5aa, 0xfbed16fb, 0x4386c543, 0x4d9ad74d,
+    0x33665533, 0x85119485, 0x458acf45, 0xf9e910f9, 0x02040602, 0x7ffe817f,
+    0x50a0f050, 0x3c78443c, 0x9f25ba9f, 0xa84be3a8, 0x51a2f351, 0xa35dfea3,
+    0x4080c040, 0x8f058a8f, 0x923fad92, 0x9d21bc9d, 0x38704838, 0xf5f104f5,
+    0xbc63dfbc, 0xb677c1b6, 0xdaaf75da, 0x21426321, 0x10203010, 0xffe51aff,
+    0xf3fd0ef3, 0xd2bf6dd2, 0xcd814ccd, 0x0c18140c, 0x13263513, 0xecc32fec,
+    0x5fbee15f, 0x9735a297, 0x4488cc44, 0x172e3917, 0xc49357c4, 0xa755f2a7,
+    0x7efc827e, 0x3d7a473d, 0x64c8ac64, 0x5dbae75d, 0x19322b19, 0x73e69573,
+    0x60c0a060, 0x81199881, 0x4f9ed14f, 0xdca37fdc, 0x22446622, 0x2a547e2a,
+    0x903bab90, 0x880b8388, 0x468cca46, 0xeec729ee, 0xb86bd3b8, 0x14283c14,
+    0xdea779de, 0x5ebce25e, 0x0b161d0b, 0xdbad76db, 0xe0db3be0, 0x32645632,
+    0x3a744e3a, 0x0a141e0a, 0x4992db49, 0x060c0a06, 0x24486c24, 0x5cb8e45c,
+    0xc29f5dc2, 0xd3bd6ed3, 0xac43efac, 0x62c4a662, 0x9139a891, 0x9531a495,
+    0xe4d337e4, 0x79f28b79, 0xe7d532e7, 0xc88b43c8, 0x376e5937, 0x6ddab76d,
+    0x8d018c8d, 0xd5b164d5, 0x4e9cd24e, 0xa949e0a9, 0x6cd8b46c, 0x56acfa56,
+    0xf4f307f4, 0xeacf25ea, 0x65caaf65, 0x7af48e7a, 0xae47e9ae, 0x08101808,
+    0xba6fd5ba, 0x78f08878, 0x254a6f25, 0x2e5c722e, 0x1c38241c, 0xa657f1a6,
+    0xb473c7b4, 0xc69751c6, 0xe8cb23e8, 0xdda17cdd, 0x74e89c74, 0x1f3e211f,
+    0x4b96dd4b, 0xbd61dcbd, 0x8b0d868b, 0x8a0f858a, 0x70e09070, 0x3e7c423e,
+    0xb571c4b5, 0x66ccaa66, 0x4890d848, 0x03060503, 0xf6f701f6, 0x0e1c120e,
+    0x61c2a361, 0x356a5f35, 0x57aef957, 0xb969d0b9, 0x86179186, 0xc19958c1,
+    0x1d3a271d, 0x9e27b99e, 0xe1d938e1, 0xf8eb13f8, 0x982bb398, 0x11223311,
+    0x69d2bb69, 0xd9a970d9, 0x8e07898e, 0x9433a794, 0x9b2db69b, 0x1e3c221e,
+    0x87159287, 0xe9c920e9, 0xce8749ce, 0x55aaff55, 0x28507828, 0xdfa57adf,
+    0x8c038f8c, 0xa159f8a1, 0x89098089, 0x0d1a170d, 0xbf65dabf, 0xe6d731e6,
+    0x4284c642, 0x68d0b868, 0x4182c341, 0x9929b099, 0x2d5a772d, 0x0f1e110f,
+    0xb07bcbb0, 0x54a8fc54, 0xbb6dd6bb, 0x162c3a16,
 };
 
 constexpr uint32_t te3[256] = {
-    0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6,
-    0x6f6fb1de, 0xc5c55491, 0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56,
-    0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec, 0xcaca458f, 0x82829d1f,
-    0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb,
-    0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753,
-    0x727296e4, 0xc0c05b9b, 0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c,
-    0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83, 0x34345c68, 0xa5a5f451,
-    0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a,
-    0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137,
-    0x05050f0a, 0x9a9ab52f, 0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf,
-    0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea, 0x09091b12, 0x83839e1d,
-    0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b,
-    0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd,
-    0x2f2f715e, 0x84849713, 0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1,
-    0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6, 0x6a6abed4, 0xcbcb468d,
-    0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85,
-    0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a,
-    0x33335566, 0x85859411, 0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe,
-    0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b, 0x5151f3a2, 0xa3a3fe5d,
-    0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1,
-    0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5,
-    0xf3f30efd, 0xd2d26dbf, 0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3,
-    0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e, 0xc4c45793, 0xa7a7f255,
-    0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6,
-    0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54,
-    0x9090ab3b, 0x8888830b, 0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28,
-    0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad, 0xe0e03bdb, 0x32325664,
-    0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8,
-    0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431,
-    0xe4e437d3, 0x79798bf2, 0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da,
-    0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049, 0x6c6cb4d8, 0x5656faac,
-    0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810,
-    0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157,
-    0xb4b4c773, 0xc6c65197, 0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e,
-    0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f, 0x707090e0, 0x3e3e427c,
-    0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c,
-    0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899,
-    0x1d1d273a, 0x9e9eb927, 0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322,
-    0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733, 0x9b9bb62d, 0x1e1e223c,
-    0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5,
-    0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7,
-    0x4242c684, 0x6868b8d0, 0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e,
-    0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c,
+    0xc6a56363, 0xf8847c7c, 0xee997777, 0xf68d7b7b, 0xff0df2f2, 0xd6bd6b6b,
+    0xdeb16f6f, 0x9154c5c5, 0x60503030, 0x02030101, 0xcea96767, 0x567d2b2b,
+    0xe719fefe, 0xb562d7d7, 0x4de6abab, 0xec9a7676, 0x8f45caca, 0x1f9d8282,
+    0x8940c9c9, 0xfa877d7d, 0xef15fafa, 0xb2eb5959, 0x8ec94747, 0xfb0bf0f0,
+    0x41ecadad, 0xb367d4d4, 0x5ffda2a2, 0x45eaafaf, 0x23bf9c9c, 0x53f7a4a4,
+    0xe4967272, 0x9b5bc0c0, 0x75c2b7b7, 0xe11cfdfd, 0x3dae9393, 0x4c6a2626,
+    0x6c5a3636, 0x7e413f3f, 0xf502f7f7, 0x834fcccc, 0x685c3434, 0x51f4a5a5,
+    0xd134e5e5, 0xf908f1f1, 0xe2937171, 0xab73d8d8, 0x62533131, 0x2a3f1515,
+    0x080c0404, 0x9552c7c7, 0x46652323, 0x9d5ec3c3, 0x30281818, 0x37a19696,
+    0x0a0f0505, 0x2fb59a9a, 0x0e090707, 0x24361212, 0x1b9b8080, 0xdf3de2e2,
+    0xcd26ebeb, 0x4e692727, 0x7fcdb2b2, 0xea9f7575, 0x121b0909, 0x1d9e8383,
+    0x58742c2c, 0x342e1a1a, 0x362d1b1b, 0xdcb26e6e, 0xb4ee5a5a, 0x5bfba0a0,
+    0xa4f65252, 0x764d3b3b, 0xb761d6d6, 0x7dceb3b3, 0x527b2929, 0xdd3ee3e3,
+    0x5e712f2f, 0x13978484, 0xa6f55353, 0xb968d1d1, 0x00000000, 0xc12ceded,
+    0x40602020, 0xe31ffcfc, 0x79c8b1b1, 0xb6ed5b5b, 0xd4be6a6a, 0x8d46cbcb,
+    0x67d9bebe, 0x724b3939, 0x94de4a4a, 0x98d44c4c, 0xb0e85858, 0x854acfcf,
+    0xbb6bd0d0, 0xc52aefef, 0x4fe5aaaa, 0xed16fbfb, 0x86c54343, 0x9ad74d4d,
+    0x66553333, 0x11948585, 0x8acf4545, 0xe910f9f9, 0x04060202, 0xfe817f7f,
+    0xa0f05050, 0x78443c3c, 0x25ba9f9f, 0x4be3a8a8, 0xa2f35151, 0x5dfea3a3,
+    0x80c04040, 0x058a8f8f, 0x3fad9292, 0x21bc9d9d, 0x70483838, 0xf104f5f5,
+    0x63dfbcbc, 0x77c1b6b6, 0xaf75dada, 0x42632121, 0x20301010, 0xe51affff,
+    0xfd0ef3f3, 0xbf6dd2d2, 0x814ccdcd, 0x18140c0c, 0x26351313, 0xc32fecec,
+    0xbee15f5f, 0x35a29797, 0x88cc4444, 0x2e391717, 0x9357c4c4, 0x55f2a7a7,
+    0xfc827e7e, 0x7a473d3d, 0xc8ac6464, 0xbae75d5d, 0x322b1919, 0xe6957373,
+    0xc0a06060, 0x19988181, 0x9ed14f4f, 0xa37fdcdc, 0x44662222, 0x547e2a2a,
+    0x3bab9090, 0x0b838888, 0x8cca4646, 0xc729eeee, 0x6bd3b8b8, 0x283c1414,
+    0xa779dede, 0xbce25e5e, 0x161d0b0b, 0xad76dbdb, 0xdb3be0e0, 0x64563232,
+    0x744e3a3a, 0x141e0a0a, 0x92db4949, 0x0c0a0606, 0x486c2424, 0xb8e45c5c,
+    0x9f5dc2c2, 0xbd6ed3d3, 0x43efacac, 0xc4a66262, 0x39a89191, 0x31a49595,
+    0xd337e4e4, 0xf28b7979, 0xd532e7e7, 0x8b43c8c8, 0x6e593737, 0xdab76d6d,
+    0x018c8d8d, 0xb164d5d5, 0x9cd24e4e, 0x49e0a9a9, 0xd8b46c6c, 0xacfa5656,
+    0xf307f4f4, 0xcf25eaea, 0xcaaf6565, 0xf48e7a7a, 0x47e9aeae, 0x10180808,
+    0x6fd5baba, 0xf0887878, 0x4a6f2525, 0x5c722e2e, 0x38241c1c, 0x57f1a6a6,
+    0x73c7b4b4, 0x9751c6c6, 0xcb23e8e8, 0xa17cdddd, 0xe89c7474, 0x3e211f1f,
+    0x96dd4b4b, 0x61dcbdbd, 0x0d868b8b, 0x0f858a8a, 0xe0907070, 0x7c423e3e,
+    0x71c4b5b5, 0xccaa6666, 0x90d84848, 0x06050303, 0xf701f6f6, 0x1c120e0e,
+    0xc2a36161, 0x6a5f3535, 0xaef95757, 0x69d0b9b9, 0x17918686, 0x9958c1c1,
+    0x3a271d1d, 0x27b99e9e, 0xd938e1e1, 0xeb13f8f8, 0x2bb39898, 0x22331111,
+    0xd2bb6969, 0xa970d9d9, 0x07898e8e, 0x33a79494, 0x2db69b9b, 0x3c221e1e,
+    0x15928787, 0xc920e9e9, 0x8749cece, 0xaaff5555, 0x50782828, 0xa57adfdf,
+    0x038f8c8c, 0x59f8a1a1, 0x09808989, 0x1a170d0d, 0x65dabfbf, 0xd731e6e6,
+    0x84c64242, 0xd0b86868, 0x82c34141, 0x29b09999, 0x5a772d2d, 0x1e110f0f,
+    0x7bcbb0b0, 0xa8fc5454, 0x6dd6bbbb, 0x2c3a1616,
 };
 
 // Software implementation of the Vector128 class, using uint32_t
@@ -235,45 +238,13 @@
 inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 Vector128Load(const void* from) {
   Vector128 result;
-  const uint8_t* src = reinterpret_cast<const uint8_t*>(from);
-  result.s[0] = static_cast<uint32_t>(src[0]) << 24 |
-                static_cast<uint32_t>(src[1]) << 16 |
-                static_cast<uint32_t>(src[2]) << 8 |
-                static_cast<uint32_t>(src[3]);
-  result.s[1] = static_cast<uint32_t>(src[4]) << 24 |
-                static_cast<uint32_t>(src[5]) << 16 |
-                static_cast<uint32_t>(src[6]) << 8 |
-                static_cast<uint32_t>(src[7]);
-  result.s[2] = static_cast<uint32_t>(src[8]) << 24 |
-                static_cast<uint32_t>(src[9]) << 16 |
-                static_cast<uint32_t>(src[10]) << 8 |
-                static_cast<uint32_t>(src[11]);
-  result.s[3] = static_cast<uint32_t>(src[12]) << 24 |
-                static_cast<uint32_t>(src[13]) << 16 |
-                static_cast<uint32_t>(src[14]) << 8 |
-                static_cast<uint32_t>(src[15]);
+  std::memcpy(result.s, from, sizeof(Vector128));
   return result;
 }
 
 inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
     const Vector128& v, void* to) {
-  uint8_t* dst = reinterpret_cast<uint8_t*>(to);
-  dst[0] = static_cast<uint8_t>(v.s[0] >> 24);
-  dst[1] = static_cast<uint8_t>(v.s[0] >> 16);
-  dst[2] = static_cast<uint8_t>(v.s[0] >> 8);
-  dst[3] = static_cast<uint8_t>(v.s[0]);
-  dst[4] = static_cast<uint8_t>(v.s[1] >> 24);
-  dst[5] = static_cast<uint8_t>(v.s[1] >> 16);
-  dst[6] = static_cast<uint8_t>(v.s[1] >> 8);
-  dst[7] = static_cast<uint8_t>(v.s[1]);
-  dst[8] = static_cast<uint8_t>(v.s[2] >> 24);
-  dst[9] = static_cast<uint8_t>(v.s[2] >> 16);
-  dst[10] = static_cast<uint8_t>(v.s[2] >> 8);
-  dst[11] = static_cast<uint8_t>(v.s[2]);
-  dst[12] = static_cast<uint8_t>(v.s[3] >> 24);
-  dst[13] = static_cast<uint8_t>(v.s[3] >> 16);
-  dst[14] = static_cast<uint8_t>(v.s[3] >> 8);
-  dst[15] = static_cast<uint8_t>(v.s[3]);
+  std::memcpy(to, v.s, sizeof(Vector128));
 }
 
 // One round of AES. "round_key" is a public constant for breaking the
@@ -281,39 +252,57 @@
 inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 AesRound(const Vector128& state, const Vector128& round_key) {
   Vector128 result;
+#ifdef ABSL_IS_LITTLE_ENDIAN
   result.s[0] = round_key.s[0] ^                  //
-                te0[uint8_t(state.s[0] >> 24)] ^  //
-                te1[uint8_t(state.s[1] >> 16)] ^  //
-                te2[uint8_t(state.s[2] >> 8)] ^   //
-                te3[uint8_t(state.s[3])];
+                te0[uint8_t(state.s[0])] ^        //
+                te1[uint8_t(state.s[1] >> 8)] ^   //
+                te2[uint8_t(state.s[2] >> 16)] ^  //
+                te3[uint8_t(state.s[3] >> 24)];
   result.s[1] = round_key.s[1] ^                  //
-                te0[uint8_t(state.s[1] >> 24)] ^  //
-                te1[uint8_t(state.s[2] >> 16)] ^  //
-                te2[uint8_t(state.s[3] >> 8)] ^   //
-                te3[uint8_t(state.s[0])];
+                te0[uint8_t(state.s[1])] ^        //
+                te1[uint8_t(state.s[2] >> 8)] ^   //
+                te2[uint8_t(state.s[3] >> 16)] ^  //
+                te3[uint8_t(state.s[0] >> 24)];
   result.s[2] = round_key.s[2] ^                  //
-                te0[uint8_t(state.s[2] >> 24)] ^  //
-                te1[uint8_t(state.s[3] >> 16)] ^  //
-                te2[uint8_t(state.s[0] >> 8)] ^   //
-                te3[uint8_t(state.s[1])];
+                te0[uint8_t(state.s[2])] ^        //
+                te1[uint8_t(state.s[3] >> 8)] ^   //
+                te2[uint8_t(state.s[0] >> 16)] ^  //
+                te3[uint8_t(state.s[1] >> 24)];
   result.s[3] = round_key.s[3] ^                  //
-                te0[uint8_t(state.s[3] >> 24)] ^  //
-                te1[uint8_t(state.s[0] >> 16)] ^  //
-                te2[uint8_t(state.s[1] >> 8)] ^   //
-                te3[uint8_t(state.s[2])];
+                te0[uint8_t(state.s[3])] ^        //
+                te1[uint8_t(state.s[0] >> 8)] ^   //
+                te2[uint8_t(state.s[1] >> 16)] ^  //
+                te3[uint8_t(state.s[2] >> 24)];
+#else
+  result.s[0] = round_key.s[0] ^                  //
+                te0[uint8_t(state.s[0])] ^        //
+                te1[uint8_t(state.s[3] >> 8)] ^   //
+                te2[uint8_t(state.s[2] >> 16)] ^  //
+                te3[uint8_t(state.s[1] >> 24)];
+  result.s[1] = round_key.s[1] ^                  //
+                te0[uint8_t(state.s[1])] ^        //
+                te1[uint8_t(state.s[0] >> 8)] ^   //
+                te2[uint8_t(state.s[3] >> 16)] ^  //
+                te3[uint8_t(state.s[2] >> 24)];
+  result.s[2] = round_key.s[2] ^                  //
+                te0[uint8_t(state.s[2])] ^        //
+                te1[uint8_t(state.s[1] >> 8)] ^   //
+                te2[uint8_t(state.s[0] >> 16)] ^  //
+                te3[uint8_t(state.s[3] >> 24)];
+  result.s[3] = round_key.s[3] ^                  //
+                te0[uint8_t(state.s[3])] ^        //
+                te1[uint8_t(state.s[2] >> 8)] ^   //
+                te2[uint8_t(state.s[1] >> 16)] ^  //
+                te3[uint8_t(state.s[0] >> 24)];
+#endif
   return result;
 }
 
 using ::absl::random_internal::RandenTraits;
 
-// Randen operates on 128-bit vectors.
-struct alignas(16) u64x2 {
-  uint64_t data[2];
-};
-
 // The improved Feistel block shuffle function for 16 blocks.
 inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
-    u64x2* state) {
+    absl::uint128* state) {
   static_assert(RandenTraits::kFeistelBlocks == 16,
                 "Feistel block shuffle only works for 16 blocks.");
 
@@ -323,31 +312,31 @@
   // The fully unrolled loop without the memcpy improves the speed by about
   // 30% over the equivalent:
 #if 0
-  u64x2 source[RandenTraits::kFeistelBlocks];
+  absl::uint128 source[RandenTraits::kFeistelBlocks];
   std::memcpy(source, state, sizeof(source));
   for (size_t i = 0; i < RandenTraits::kFeistelBlocks; i++) {
-    const u64x2 v0 = source[shuffle[i]];
+    const absl::uint128 v0 = source[shuffle[i]];
     state[i] = v0;
   }
   return;
 #endif
 
-  const u64x2 v0 = state[shuffle[0]];
-  const u64x2 v1 = state[shuffle[1]];
-  const u64x2 v2 = state[shuffle[2]];
-  const u64x2 v3 = state[shuffle[3]];
-  const u64x2 v4 = state[shuffle[4]];
-  const u64x2 v5 = state[shuffle[5]];
-  const u64x2 v6 = state[shuffle[6]];
-  const u64x2 v7 = state[shuffle[7]];
-  const u64x2 w0 = state[shuffle[8]];
-  const u64x2 w1 = state[shuffle[9]];
-  const u64x2 w2 = state[shuffle[10]];
-  const u64x2 w3 = state[shuffle[11]];
-  const u64x2 w4 = state[shuffle[12]];
-  const u64x2 w5 = state[shuffle[13]];
-  const u64x2 w6 = state[shuffle[14]];
-  const u64x2 w7 = state[shuffle[15]];
+  const absl::uint128 v0 = state[shuffle[0]];
+  const absl::uint128 v1 = state[shuffle[1]];
+  const absl::uint128 v2 = state[shuffle[2]];
+  const absl::uint128 v3 = state[shuffle[3]];
+  const absl::uint128 v4 = state[shuffle[4]];
+  const absl::uint128 v5 = state[shuffle[5]];
+  const absl::uint128 v6 = state[shuffle[6]];
+  const absl::uint128 v7 = state[shuffle[7]];
+  const absl::uint128 w0 = state[shuffle[8]];
+  const absl::uint128 w1 = state[shuffle[9]];
+  const absl::uint128 w2 = state[shuffle[10]];
+  const absl::uint128 w3 = state[shuffle[11]];
+  const absl::uint128 w4 = state[shuffle[12]];
+  const absl::uint128 w5 = state[shuffle[13]];
+  const absl::uint128 w6 = state[shuffle[14]];
+  const absl::uint128 w7 = state[shuffle[15]];
   state[0] = v0;
   state[1] = v1;
   state[2] = v2;
@@ -371,9 +360,9 @@
 // per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
 // parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
 // XORs are 'free' (included in the second AES instruction).
-inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
-    u64x2* ABSL_RANDOM_INTERNAL_RESTRICT state,
-    const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE const absl::uint128*
+FeistelRound(absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT state,
+             const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
   for (size_t branch = 0; branch < RandenTraits::kFeistelBlocks; branch += 4) {
     const Vector128 s0 = Vector128Load(state + branch);
     const Vector128 s1 = Vector128Load(state + branch + 1);
@@ -398,13 +387,31 @@
 // 2^64 queries if the round function is a PRF. This is similar to the b=8 case
 // of Simpira v2, but more efficient than its generic construction for b=16.
 inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
-    u64x2* state, const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
+    absl::uint128* state,
+    const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
   for (size_t round = 0; round < RandenTraits::kFeistelRounds; ++round) {
     keys = FeistelRound(state, keys);
     BlockShuffle(state);
   }
 }
 
+// Enables native loads in the round loop by pre-swapping.
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
+    absl::uint128* state) {
+#ifdef ABSL_IS_BIG_ENDIAN
+  for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) {
+    uint64_t new_lo = absl::little_endian::ToHost64(
+        static_cast<uint64_t>(state[block] >> 64));
+    uint64_t new_hi = absl::little_endian::ToHost64(
+        static_cast<uint64_t>((state[block] << 64) >> 64));
+    state[block] = (static_cast<absl::uint128>(new_hi) << 64) | new_lo;
+  }
+#else
+  // Avoid warning about unused variable.
+  (void)state;
+#endif
+}
+
 }  // namespace
 
 namespace absl {
@@ -414,7 +421,11 @@
 const void* RandenSlow::GetKeys() {
   // Round keys for one AES per Feistel round and branch.
   // The canonical implementation uses first digits of Pi.
+#ifdef ABSL_IS_LITTLE_ENDIAN
   return kRandenRoundKeys;
+#else
+  return kRandenRoundKeysBE;
+#endif
 }
 
 void RandenSlow::Absorb(const void* seed_void, void* state_void) {
@@ -437,19 +448,22 @@
 }
 
 void RandenSlow::Generate(const void* keys_void, void* state_void) {
-  static_assert(RandenTraits::kCapacityBytes == sizeof(u64x2),
+  static_assert(RandenTraits::kCapacityBytes == sizeof(absl::uint128),
                 "Capacity mismatch");
 
-  auto* state = reinterpret_cast<u64x2*>(state_void);
-  const auto* keys = reinterpret_cast<const u64x2*>(keys_void);
+  auto* state = reinterpret_cast<absl::uint128*>(state_void);
+  const auto* keys = reinterpret_cast<const absl::uint128*>(keys_void);
 
-  const u64x2 prev_inner = state[0];
+  const absl::uint128 prev_inner = state[0];
+
+  SwapEndian(state);
 
   Permute(state, keys);
 
+  SwapEndian(state);
+
   // Ensure backtracking resistance.
-  state[0].data[0] ^= prev_inner.data[0];
-  state[0].data[1] ^= prev_inner.data[1];
+  *state ^= prev_inner;
 }
 
 }  // namespace random_internal
diff --git a/abseil-cpp/absl/random/internal/randen_slow.h b/abseil-cpp/absl/random/internal/randen_slow.h
index b6f137e..532c3a8 100644
--- a/abseil-cpp/absl/random/internal/randen_slow.h
+++ b/abseil-cpp/absl/random/internal/randen_slow.h
@@ -23,7 +23,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-// RANDen = RANDom generator or beetroots in Swiss German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
 // RandenSlow implements the basic state manipulation methods for
 // architectures lacking AES hardware acceleration intrinsics.
 class RandenSlow {
diff --git a/abseil-cpp/absl/random/internal/randen_slow_test.cc b/abseil-cpp/absl/random/internal/randen_slow_test.cc
index 4a53583..ed60395 100644
--- a/abseil-cpp/absl/random/internal/randen_slow_test.cc
+++ b/abseil-cpp/absl/random/internal/randen_slow_test.cc
@@ -17,6 +17,7 @@
 #include <cstring>
 
 #include "gtest/gtest.h"
+#include "absl/base/internal/endian.h"
 #include "absl/random/internal/randen_traits.h"
 
 namespace {
@@ -24,40 +25,37 @@
 using absl::random_internal::RandenSlow;
 using absl::random_internal::RandenTraits;
 
-// Local state parameters.
-constexpr size_t kSeedBytes =
-    RandenTraits::kStateBytes - RandenTraits::kCapacityBytes;
-constexpr size_t kStateSizeT = RandenTraits::kStateBytes / sizeof(uint64_t);
-constexpr size_t kSeedSizeT = kSeedBytes / sizeof(uint32_t);
-
-struct alignas(16) randen {
-  uint64_t state[kStateSizeT];
-  uint32_t seed[kSeedSizeT];
-};
-
 TEST(RandenSlowTest, Default) {
-  constexpr uint64_t kGolden[] = {
-      0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977,
-      0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912,
-      0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6,
-      0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a,
-      0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37,
-      0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556,
-      0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42,
-      0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc,
-      0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f,
-      0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3,
-      0x026ff374c101da7e, 0x811ef0821c3de851,
+  constexpr uint8_t kGolden[] = {
+      0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
+      0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
+      0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
+      0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
+      0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
+      0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
+      0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
+      0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
+      0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
+      0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
+      0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
+      0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
+      0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
+      0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
+      0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
+      0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
+      0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
+      0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
+      0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
+      0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
+      0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
+      0x82, 0xf0, 0x1e, 0x81,
   };
 
-  alignas(16) randen d;
-  std::memset(d.state, 0, sizeof(d.state));
-  RandenSlow::Generate(RandenSlow::GetKeys(), d.state);
+  alignas(16) uint8_t state[RandenTraits::kStateBytes];
+  std::memset(state, 0, sizeof(state));
 
-  uint64_t* id = d.state;
-  for (const auto& elem : kGolden) {
-    EXPECT_EQ(elem, *id++);
-  }
+  RandenSlow::Generate(RandenSlow::GetKeys(), state);
+  EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/random/internal/randen_test.cc b/abseil-cpp/absl/random/internal/randen_test.cc
index c186fe0..92773b8 100644
--- a/abseil-cpp/absl/random/internal/randen_test.cc
+++ b/abseil-cpp/absl/random/internal/randen_test.cc
@@ -23,9 +23,6 @@
 
 using absl::random_internal::Randen;
 
-// Local state parameters.
-constexpr size_t kStateSizeT = Randen::kStateBytes / sizeof(uint64_t);
-
 TEST(RandenTest, CopyAndMove) {
   static_assert(std::is_copy_constructible<Randen>::value,
                 "Randen must be copy constructible");
@@ -41,30 +38,38 @@
 }
 
 TEST(RandenTest, Default) {
-  constexpr uint64_t kGolden[] = {
-      0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977,
-      0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912,
-      0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6,
-      0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a,
-      0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37,
-      0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556,
-      0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42,
-      0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc,
-      0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f,
-      0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3,
-      0x026ff374c101da7e, 0x811ef0821c3de851,
+  constexpr uint8_t kGolden[] = {
+      0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
+      0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
+      0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
+      0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
+      0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
+      0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
+      0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
+      0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
+      0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
+      0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
+      0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
+      0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
+      0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
+      0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
+      0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
+      0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
+      0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
+      0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
+      0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
+      0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
+      0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
+      0x82, 0xf0, 0x1e, 0x81,
   };
 
-  alignas(16) uint64_t state[kStateSizeT];
+  alignas(16) uint8_t state[Randen::kStateBytes];
   std::memset(state, 0, sizeof(state));
 
   Randen r;
   r.Generate(state);
 
-  auto id = std::begin(state);
-  for (const auto& elem : kGolden) {
-    EXPECT_EQ(elem, *id++);
-  }
+  EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/random/internal/randen_traits.h b/abseil-cpp/absl/random/internal/randen_traits.h
index 53caa93..120022c 100644
--- a/abseil-cpp/absl/random/internal/randen_traits.h
+++ b/abseil-cpp/absl/random/internal/randen_traits.h
@@ -28,7 +28,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-// RANDen = RANDom generator or beetroots in Swiss German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
 // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
 // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
 //
diff --git a/abseil-cpp/absl/random/internal/salted_seed_seq.h b/abseil-cpp/absl/random/internal/salted_seed_seq.h
index 5953a09..0629186 100644
--- a/abseil-cpp/absl/random/internal/salted_seed_seq.h
+++ b/abseil-cpp/absl/random/internal/salted_seed_seq.h
@@ -22,6 +22,7 @@
 #include <memory>
 #include <type_traits>
 #include <utility>
+#include <vector>
 
 #include "absl/container/inlined_vector.h"
 #include "absl/meta/type_traits.h"
@@ -65,15 +66,19 @@
 
   template <typename RandomAccessIterator>
   void generate(RandomAccessIterator begin, RandomAccessIterator end) {
+    using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
+
     // The common case is that generate is called with ContiguousIterators
     // to uint arrays. Such contiguous memory regions may be optimized,
     // which we detect here.
-    using tag = absl::conditional_t<
-        (std::is_pointer<RandomAccessIterator>::value &&
-         std::is_same<absl::decay_t<decltype(*begin)>, uint32_t>::value),
+    using TagType = absl::conditional_t<
+        (std::is_same<U, uint32_t>::value &&
+         (std::is_pointer<RandomAccessIterator>::value ||
+          std::is_same<RandomAccessIterator,
+                       typename std::vector<U>::iterator>::value)),
         ContiguousAndUint32Tag, DefaultTag>;
     if (begin != end) {
-      generate_impl(begin, end, tag{});
+      generate_impl(TagType{}, begin, end, std::distance(begin, end));
     }
   }
 
@@ -89,8 +94,15 @@
   struct DefaultTag {};
 
   // Generate which requires the iterators are contiguous pointers to uint32_t.
-  void generate_impl(uint32_t* begin, uint32_t* end, ContiguousAndUint32Tag) {
-    generate_contiguous(absl::MakeSpan(begin, end));
+  // Fills the initial seed buffer the underlying SSeq::generate() call,
+  // then mixes in the salt material.
+  template <typename Contiguous>
+  void generate_impl(ContiguousAndUint32Tag, Contiguous begin, Contiguous end,
+                     size_t n) {
+    seq_->generate(begin, end);
+    const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
+    auto span = absl::Span<uint32_t>(&*begin, n);
+    MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), span);
   }
 
   // The uncommon case for generate is that it is called with iterators over
@@ -98,27 +110,13 @@
   // case we allocate a temporary 32-bit buffer and then copy-assign back
   // to the initial inputs.
   template <typename RandomAccessIterator>
-  void generate_impl(RandomAccessIterator begin, RandomAccessIterator end,
-                     DefaultTag) {
-    return generate_and_copy(std::distance(begin, end), begin);
-  }
-
-  // Fills the initial seed buffer the underlying SSeq::generate() call,
-  // mixing in the salt material.
-  void generate_contiguous(absl::Span<uint32_t> buffer) {
-    seq_->generate(buffer.begin(), buffer.end());
-    const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
-    MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), buffer);
-  }
-
-  // Allocates a seed buffer of `n` elements, generates the seed, then
-  // copies the result into the `out` iterator.
-  template <typename Iterator>
-  void generate_and_copy(size_t n, Iterator out) {
-    // Allocate a temporary buffer, generate, and then copy.
+  void generate_impl(DefaultTag, RandomAccessIterator begin,
+                     RandomAccessIterator, size_t n) {
+    // Allocates a seed buffer of `n` elements, generates the seed, then
+    // copies the result into the `out` iterator.
     absl::InlinedVector<uint32_t, 8> data(n, 0);
-    generate_contiguous(absl::MakeSpan(data.data(), data.size()));
-    std::copy(data.begin(), data.end(), out);
+    generate_impl(ContiguousAndUint32Tag{}, data.begin(), data.end(), n);
+    std::copy(data.begin(), data.end(), begin);
   }
 
   // Because [rand.req.seedseq] is not required to be copy-constructible,
diff --git a/abseil-cpp/absl/random/internal/seed_material.cc b/abseil-cpp/absl/random/internal/seed_material.cc
index 4d38a57..1041302 100644
--- a/abseil-cpp/absl/random/internal/seed_material.cc
+++ b/abseil-cpp/absl/random/internal/seed_material.cc
@@ -28,6 +28,7 @@
 #include <cstdlib>
 #include <cstring>
 
+#include "absl/base/dynamic_annotations.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/escaping.h"
@@ -50,6 +51,18 @@
 
 #endif
 
+#if defined(__GLIBC__) && \
+    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 25))
+// glibc >= 2.25 has getentropy()
+#define ABSL_RANDOM_USE_GET_ENTROPY 1
+#endif
+
+#if defined(__EMSCRIPTEN__)
+#include <sys/random.h>
+// Emscripten has getentropy, but it resides in a different header.
+#define ABSL_RANDOM_USE_GET_ENTROPY 1
+#endif
+
 #if defined(ABSL_RANDOM_USE_BCRYPT)
 #include <bcrypt.h>
 
@@ -122,8 +135,32 @@
 
 #else
 
+#if defined(ABSL_RANDOM_USE_GET_ENTROPY)
+// On *nix, use getentropy() if supported. Note that libc may support
+// getentropy(), but the kernel may not, in which case this function will return
+// false.
+bool ReadSeedMaterialFromGetEntropy(absl::Span<uint32_t> values) {
+  auto buffer = reinterpret_cast<uint8_t*>(values.data());
+  size_t buffer_size = sizeof(uint32_t) * values.size();
+  while (buffer_size > 0) {
+    // getentropy() has a maximum permitted length of 256.
+    size_t to_read = std::min<size_t>(buffer_size, 256);
+    int result = getentropy(buffer, to_read);
+    if (result < 0) {
+      return false;
+    }
+    // https://github.com/google/sanitizers/issues/1173
+    // MemorySanitizer can't see through getentropy().
+    ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buffer, to_read);
+    buffer += to_read;
+    buffer_size -= to_read;
+  }
+  return true;
+}
+#endif  // defined(ABSL_RANDOM_GETENTROPY)
+
 // On *nix, read entropy from /dev/urandom.
-bool ReadSeedMaterialFromOSEntropyImpl(absl::Span<uint32_t> values) {
+bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) {
   const char kEntropyFile[] = "/dev/urandom";
 
   auto buffer = reinterpret_cast<uint8_t*>(values.data());
@@ -136,12 +173,12 @@
   }
 
   while (success && buffer_size > 0) {
-    int bytes_read = read(dev_urandom, buffer, buffer_size);
+    ssize_t bytes_read = read(dev_urandom, buffer, buffer_size);
     int read_error = errno;
     success = (bytes_read > 0);
     if (success) {
       buffer += bytes_read;
-      buffer_size -= bytes_read;
+      buffer_size -= static_cast<size_t>(bytes_read);
     } else if (bytes_read == -1 && read_error == EINTR) {
       success = true;  // Need to try again.
     }
@@ -150,6 +187,17 @@
   return success;
 }
 
+bool ReadSeedMaterialFromOSEntropyImpl(absl::Span<uint32_t> values) {
+#if defined(ABSL_RANDOM_USE_GET_ENTROPY)
+  if (ReadSeedMaterialFromGetEntropy(values)) {
+    return true;
+  }
+#endif
+  // Libc may support getentropy, but the kernel may not, so we still have
+  // to fallback to ReadSeedMaterialFromDevURandom().
+  return ReadSeedMaterialFromDevURandom(values);
+}
+
 #endif
 
 }  // namespace
diff --git a/abseil-cpp/absl/random/internal/traits.h b/abseil-cpp/absl/random/internal/traits.h
index 75772bd..f874a0f 100644
--- a/abseil-cpp/absl/random/internal/traits.h
+++ b/abseil-cpp/absl/random/internal/traits.h
@@ -20,6 +20,8 @@
 #include <type_traits>
 
 #include "absl/base/config.h"
+#include "absl/numeric/bits.h"
+#include "absl/numeric/int128.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -59,6 +61,31 @@
       rank<A>() <= rank<B>();
 };
 
+template <typename T>
+struct IsIntegral : std::is_integral<T> {};
+template <>
+struct IsIntegral<absl::int128> : std::true_type {};
+template <>
+struct IsIntegral<absl::uint128> : std::true_type {};
+
+template <typename T>
+struct MakeUnsigned : std::make_unsigned<T> {};
+template <>
+struct MakeUnsigned<absl::int128> {
+  using type = absl::uint128;
+};
+template <>
+struct MakeUnsigned<absl::uint128> {
+  using type = absl::uint128;
+};
+
+template <typename T>
+struct IsUnsigned : std::is_unsigned<T> {};
+template <>
+struct IsUnsigned<absl::int128> : std::false_type {};
+template <>
+struct IsUnsigned<absl::uint128> : std::true_type {};
+
 // unsigned_bits<N>::type returns the unsigned int type with the indicated
 // number of bits.
 template <size_t N>
@@ -81,19 +108,40 @@
   using type = uint64_t;
 };
 
-#ifdef ABSL_HAVE_INTRINSIC_INT128
 template <>
 struct unsigned_bits<128> {
-  using type = __uint128_t;
+  using type = absl::uint128;
 };
-#endif
+
+// 256-bit wrapper for wide multiplications.
+struct U256 {
+  uint128 hi;
+  uint128 lo;
+};
+template <>
+struct unsigned_bits<256> {
+  using type = U256;
+};
 
 template <typename IntType>
 struct make_unsigned_bits {
-  using type = typename unsigned_bits<std::numeric_limits<
-      typename std::make_unsigned<IntType>::type>::digits>::type;
+  using type = typename unsigned_bits<
+      std::numeric_limits<typename MakeUnsigned<IntType>::type>::digits>::type;
 };
 
+template <typename T>
+int BitWidth(T v) {
+  // Workaround for bit_width not supporting int128.
+  // Don't hardcode `64` to make sure this code does not trigger compiler
+  // warnings in smaller types.
+  constexpr int half_bits = sizeof(T) * 8 / 2;
+  if (sizeof(T) == 16 && (v >> half_bits) != 0) {
+    return bit_width(static_cast<uint64_t>(v >> half_bits)) + half_bits;
+  } else {
+    return bit_width(static_cast<uint64_t>(v));
+  }
+}
+
 }  // namespace random_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/random/internal/uniform_helper.h b/abseil-cpp/absl/random/internal/uniform_helper.h
index 1243bc1..db737e1 100644
--- a/abseil-cpp/absl/random/internal/uniform_helper.h
+++ b/abseil-cpp/absl/random/internal/uniform_helper.h
@@ -100,7 +100,7 @@
 template <typename IntType, typename Tag>
 typename absl::enable_if_t<
     absl::conjunction<
-        std::is_integral<IntType>,
+        IsIntegral<IntType>,
         absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>,
                           std::is_same<Tag, IntervalOpenOpenTag>>>::value,
     IntType>
@@ -131,7 +131,7 @@
 template <typename IntType, typename Tag>
 typename absl::enable_if_t<
     absl::conjunction<
-        std::is_integral<IntType>,
+        IsIntegral<IntType>,
         absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>,
                           std::is_same<Tag, IntervalOpenOpenTag>>>::value,
     IntType>
@@ -153,7 +153,7 @@
 template <typename IntType, typename Tag>
 typename absl::enable_if_t<
     absl::conjunction<
-        std::is_integral<IntType>,
+        IsIntegral<IntType>,
         absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>,
                           std::is_same<Tag, IntervalOpenClosedTag>>>::value,
     IntType>
@@ -201,7 +201,7 @@
 }
 
 template <typename IntType>
-absl::enable_if_t<std::is_integral<IntType>::value, bool>
+absl::enable_if_t<IsIntegral<IntType>::value, bool>
 is_uniform_range_valid(IntType a, IntType b) {
   return a <= b;
 }
@@ -210,14 +210,14 @@
 // or absl::uniform_real_distribution depending on the NumType parameter.
 template <typename NumType>
 using UniformDistribution =
-    typename std::conditional<std::is_integral<NumType>::value,
+    typename std::conditional<IsIntegral<NumType>::value,
                               absl::uniform_int_distribution<NumType>,
                               absl::uniform_real_distribution<NumType>>::type;
 
 // UniformDistributionWrapper is used as the underlying distribution type
 // by the absl::Uniform template function. It selects the proper Abseil
 // uniform distribution and provides constructor overloads that match the
-// expected parameter order as well as adjusting distribtuion bounds based
+// expected parameter order as well as adjusting distribution bounds based
 // on the tag.
 template <typename NumType>
 struct UniformDistributionWrapper : public UniformDistribution<NumType> {
diff --git a/abseil-cpp/absl/random/internal/wide_multiply.h b/abseil-cpp/absl/random/internal/wide_multiply.h
index 0afcbe0..891e363 100644
--- a/abseil-cpp/absl/random/internal/wide_multiply.h
+++ b/abseil-cpp/absl/random/internal/wide_multiply.h
@@ -26,7 +26,7 @@
 #endif
 
 #include "absl/base/config.h"
-#include "absl/base/internal/bits.h"
+#include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
 #include "absl/random/internal/traits.h"
 
@@ -34,43 +34,6 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-// Helper object to multiply two 64-bit values to a 128-bit value.
-// MultiplyU64ToU128 multiplies two 64-bit values to a 128-bit value.
-// If an intrinsic is available, it is used, otherwise use native 32-bit
-// multiplies to construct the result.
-inline absl::uint128 MultiplyU64ToU128(uint64_t a, uint64_t b) {
-#if defined(ABSL_HAVE_INTRINSIC_INT128)
-  return absl::uint128(static_cast<__uint128_t>(a) * b);
-#elif defined(ABSL_INTERNAL_USE_UMUL128)
-  // uint64_t * uint64_t => uint128 multiply using imul intrinsic on MSVC.
-  uint64_t high = 0;
-  const uint64_t low = _umul128(a, b, &high);
-  return absl::MakeUint128(high, low);
-#else
-  // uint128(a) * uint128(b) in emulated mode computes a full 128-bit x 128-bit
-  // multiply.  However there are many cases where that is not necessary, and it
-  // is only necessary to support a 64-bit x 64-bit = 128-bit multiply.  This is
-  // for those cases.
-  const uint64_t a00 = static_cast<uint32_t>(a);
-  const uint64_t a32 = a >> 32;
-  const uint64_t b00 = static_cast<uint32_t>(b);
-  const uint64_t b32 = b >> 32;
-
-  const uint64_t c00 = a00 * b00;
-  const uint64_t c32a = a00 * b32;
-  const uint64_t c32b = a32 * b00;
-  const uint64_t c64 = a32 * b32;
-
-  const uint32_t carry =
-      static_cast<uint32_t>(((c00 >> 32) + static_cast<uint32_t>(c32a) +
-                             static_cast<uint32_t>(c32b)) >>
-                            32);
-
-  return absl::MakeUint128(c64 + (c32a >> 32) + (c32b >> 32) + carry,
-                           c00 + (c32a << 32) + (c32b << 32));
-#endif
-}
-
 // wide_multiply<T> multiplies two N-bit values to a 2N-bit result.
 template <typename UIntType>
 struct wide_multiply {
@@ -82,27 +45,49 @@
     return static_cast<result_type>(a) * b;
   }
 
-  static input_type hi(result_type r) { return r >> kN; }
-  static input_type lo(result_type r) { return r; }
+  static input_type hi(result_type r) {
+    return static_cast<input_type>(r >> kN);
+  }
+  static input_type lo(result_type r) { return static_cast<input_type>(r); }
 
   static_assert(std::is_unsigned<UIntType>::value,
                 "Class-template wide_multiply<> argument must be unsigned.");
 };
 
-#ifndef ABSL_HAVE_INTRINSIC_INT128
-template <>
-struct wide_multiply<uint64_t> {
-  using input_type = uint64_t;
-  using result_type = absl::uint128;
+// MultiplyU128ToU256 multiplies two 128-bit values to a 256-bit value.
+inline U256 MultiplyU128ToU256(uint128 a, uint128 b) {
+  const uint128 a00 = static_cast<uint64_t>(a);
+  const uint128 a64 = a >> 64;
+  const uint128 b00 = static_cast<uint64_t>(b);
+  const uint128 b64 = b >> 64;
 
-  static result_type multiply(uint64_t a, uint64_t b) {
-    return MultiplyU64ToU128(a, b);
+  const uint128 c00 = a00 * b00;
+  const uint128 c64a = a00 * b64;
+  const uint128 c64b = a64 * b00;
+  const uint128 c128 = a64 * b64;
+
+  const uint64_t carry =
+      static_cast<uint64_t>(((c00 >> 64) + static_cast<uint64_t>(c64a) +
+                             static_cast<uint64_t>(c64b)) >>
+                            64);
+
+  return {c128 + (c64a >> 64) + (c64b >> 64) + carry,
+          c00 + (c64a << 64) + (c64b << 64)};
+}
+
+
+template <>
+struct wide_multiply<uint128> {
+  using input_type = uint128;
+  using result_type = U256;
+
+  static result_type multiply(input_type a, input_type b) {
+    return MultiplyU128ToU256(a, b);
   }
 
-  static uint64_t hi(result_type r) { return absl::Uint128High64(r); }
-  static uint64_t lo(result_type r) { return absl::Uint128Low64(r); }
+  static input_type hi(result_type r) { return r.hi; }
+  static input_type lo(result_type r) { return r.lo; }
 };
-#endif
 
 }  // namespace random_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/random/internal/wide_multiply_test.cc b/abseil-cpp/absl/random/internal/wide_multiply_test.cc
index ca8ce92..f8ee35c 100644
--- a/abseil-cpp/absl/random/internal/wide_multiply_test.cc
+++ b/abseil-cpp/absl/random/internal/wide_multiply_test.cc
@@ -14,53 +14,106 @@
 
 #include "absl/random/internal/wide_multiply.h"
 
+#include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/bits.h"
 #include "absl/numeric/int128.h"
 
-using absl::random_internal::MultiplyU64ToU128;
+using absl::random_internal::MultiplyU128ToU256;
+using absl::random_internal::U256;
 
 namespace {
 
-TEST(WideMultiplyTest, MultiplyU64ToU128Test) {
-  constexpr uint64_t k1 = 1;
-  constexpr uint64_t kMax = ~static_cast<uint64_t>(0);
+U256 LeftShift(U256 v, int s) {
+  if (s == 0) {
+    return v;
+  } else if (s < 128) {
+    return {(v.hi << s) | (v.lo >> (128 - s)), v.lo << s};
+  } else {
+    return {v.lo << (s - 128), 0};
+  }
+}
 
-  EXPECT_EQ(absl::uint128(0), MultiplyU64ToU128(0, 0));
+MATCHER_P2(Eq256, hi, lo, "") { return arg.hi == hi && arg.lo == lo; }
+MATCHER_P(Eq256, v, "") { return arg.hi == v.hi && arg.lo == v.lo; }
 
-  // Max uint64_t
-  EXPECT_EQ(MultiplyU64ToU128(kMax, kMax),
-            absl::MakeUint128(0xfffffffffffffffe, 0x0000000000000001));
-  EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(kMax, 1));
-  EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(1, kMax));
+TEST(WideMultiplyTest, MultiplyU128ToU256Test) {
+  using absl::uint128;
+  constexpr uint128 k1 = 1;
+  constexpr uint128 kMax = ~static_cast<uint128>(0);
+
+  EXPECT_THAT(MultiplyU128ToU256(0, 0), Eq256(0, 0));
+
+  // Max uin128_t
+  EXPECT_THAT(MultiplyU128ToU256(kMax, kMax), Eq256(kMax << 1, 1));
+  EXPECT_THAT(MultiplyU128ToU256(kMax, 1), Eq256(0, kMax));
+  EXPECT_THAT(MultiplyU128ToU256(1, kMax), Eq256(0, kMax));
   for (int i = 0; i < 64; ++i) {
-    EXPECT_EQ(absl::MakeUint128(0, kMax) << i,
-              MultiplyU64ToU128(kMax, k1 << i));
-    EXPECT_EQ(absl::MakeUint128(0, kMax) << i,
-              MultiplyU64ToU128(k1 << i, kMax));
+    SCOPED_TRACE(i);
+    EXPECT_THAT(MultiplyU128ToU256(kMax, k1 << i),
+                Eq256(LeftShift({0, kMax}, i)));
+    EXPECT_THAT(MultiplyU128ToU256(k1 << i, kMax),
+                Eq256(LeftShift({0, kMax}, i)));
   }
 
   // 1-bit x 1-bit.
   for (int i = 0; i < 64; ++i) {
     for (int j = 0; j < 64; ++j) {
-      EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j),
-                MultiplyU64ToU128(k1 << i, k1 << j));
-      EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j),
-                MultiplyU64ToU128(k1 << i, k1 << j));
+      EXPECT_THAT(MultiplyU128ToU256(k1 << i, k1 << j),
+                  Eq256(LeftShift({0, 1}, i + j)));
     }
   }
 
   // Verified multiplies
-  EXPECT_EQ(MultiplyU64ToU128(0xffffeeeeddddcccc, 0xbbbbaaaa99998888),
-            absl::MakeUint128(0xbbbb9e2692c5dddc, 0xc28f7531048d2c60));
-  EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfedcba9876543210),
-            absl::MakeUint128(0x0121fa00ad77d742, 0x2236d88fe5618cf0));
-  EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfdb97531eca86420),
-            absl::MakeUint128(0x0120ae99d26725fc, 0xce197f0ecac319e0));
-  EXPECT_EQ(MultiplyU64ToU128(0x97a87f4f261ba3f2, 0xfedcba9876543210),
-            absl::MakeUint128(0x96fbf1a8ae78d0ba, 0x5a6dd4b71f278320));
-  EXPECT_EQ(MultiplyU64ToU128(0xfedcba9876543210, 0xfdb97531eca86420),
-            absl::MakeUint128(0xfc98c6981a413e22, 0x342d0bbf48948200));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0xc502da0d6ea99fe8, 0xfa3c9141a1f50912),
+                  absl::MakeUint128(0x96bcf1ac37f97bd6, 0x27e2cdeb5fb2299e)),
+              Eq256(absl::MakeUint128(0x740113d838f96a64, 0x22e8cfa4d71f89ea),
+                    absl::MakeUint128(0x19184a345c62e993, 0x237871b630337b1c)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0x6f29e670cee07230, 0xc3d8e6c3e4d86759),
+                  absl::MakeUint128(0x3227d29fa6386db1, 0x231682bb1e4b764f)),
+              Eq256(absl::MakeUint128(0x15c779d9d5d3b07c, 0xd7e6c827f0c81cbe),
+                    absl::MakeUint128(0xf88e3914f7fa287a, 0x15b79975137dea77)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0xafb77107215646e1, 0x3b844cb1ac5769e7),
+                  absl::MakeUint128(0x1ff7b2d888b62479, 0x92f758ae96fcba0b)),
+              Eq256(absl::MakeUint128(0x15f13b70181f6985, 0x2adb36bbabce7d02),
+                    absl::MakeUint128(0x6c470d72e13aad04, 0x63fba3f5841762ed)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0xd85d5558d67ac905, 0xf88c70654dae19b1),
+                  absl::MakeUint128(0x17252c6727db3738, 0x399ff658c511eedc)),
+              Eq256(absl::MakeUint128(0x138fcdaf8b0421ee, 0x1b465ddf2a0d03f6),
+                    absl::MakeUint128(0x8f573ba68296860f, 0xf327d2738741a21c)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0x46f0421a37ff6bee, 0xa61df89f09d140b1),
+                  absl::MakeUint128(0x3d712ec9f37ca2e1, 0x9658a2cba47ef4b1)),
+              Eq256(absl::MakeUint128(0x11069cc48ee7c95d, 0xd35fb1c7aa91c978),
+                    absl::MakeUint128(0xbe2f4a6de874b015, 0xd2f7ac1b76746e61)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0x730d27c72d58fa49, 0x3ebeda7498f8827c),
+                  absl::MakeUint128(0xa2c959eca9f503af, 0x189c687eb842bbd8)),
+              Eq256(absl::MakeUint128(0x4928d0ea356ba022, 0x1546d34a2963393),
+                    absl::MakeUint128(0x7481531e1e0a16d1, 0xdd8025015cf6aca0)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0x6ca41020f856d2f1, 0xb9b0838c04a7f4aa),
+                  absl::MakeUint128(0x9cf41d28a8396f54, 0x1d681695e377ffe6)),
+              Eq256(absl::MakeUint128(0x429b92934d9be6f1, 0xea182877157c1e7),
+                    absl::MakeUint128(0x7135c23f0a4a475, 0xc1adc366f4a126bc)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0x57472833797c332, 0x6c79272fdec4687a),
+                  absl::MakeUint128(0xb5f022ea3838e46b, 0x16face2f003e27a6)),
+              Eq256(absl::MakeUint128(0x3e072e0962b3400, 0x5d9fe8fdc3d0e1f4),
+                    absl::MakeUint128(0x7dc0df47cedafd62, 0xbe6501f1acd2551c)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0xf0fb4198322eb1c2, 0xfe7f5f31f3885938),
+                  absl::MakeUint128(0xd99012b71bb7aa31, 0xac7a6f9eb190789)),
+              Eq256(absl::MakeUint128(0xcccc998cf075ca01, 0x642d144322fb873a),
+                    absl::MakeUint128(0xc79dc12b69d91ed4, 0xa83459132ce046f8)));
+  EXPECT_THAT(MultiplyU128ToU256(
+                  absl::MakeUint128(0xb5c04120848cdb47, 0x8aa62a827bf52635),
+                  absl::MakeUint128(0x8d07a359be2f1380, 0x467bb90d59da0dea)),
+              Eq256(absl::MakeUint128(0x64205019d139a9ce, 0x99425c5fb6e7a977),
+                    absl::MakeUint128(0xd3e99628a9e5fca7, 0x9c7824cb7279d72)));
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/random/log_uniform_int_distribution.h b/abseil-cpp/absl/random/log_uniform_int_distribution.h
index 960816e..4afff8f 100644
--- a/abseil-cpp/absl/random/log_uniform_int_distribution.h
+++ b/abseil-cpp/absl/random/log_uniform_int_distribution.h
@@ -23,6 +23,7 @@
 #include <ostream>
 #include <type_traits>
 
+#include "absl/numeric/bits.h"
 #include "absl/random/internal/fastmath.h"
 #include "absl/random/internal/generate_real.h"
 #include "absl/random/internal/iostream_state_saver.h"
@@ -68,7 +69,7 @@
       if (base_ == 2) {
         // Determine where the first set bit is on range(), giving a log2(range)
         // value which can be used to construct bounds.
-        log_range_ = (std::min)(random_internal::LeadingSetBit(range()),
+        log_range_ = (std::min)(random_internal::BitWidth(range()),
                                 std::numeric_limits<unsigned_type>::digits);
       } else {
         // NOTE: Computing the logN(x) introduces error from 2 sources:
@@ -80,7 +81,7 @@
         //
         // Thus a result which should equal K may equal K +/- epsilon,
         // which can eliminate some values depending on where the bounds fall.
-        const double inv_log_base = 1.0 / std::log(base_);
+        const double inv_log_base = 1.0 / std::log(static_cast<double>(base_));
         const double log_range = std::log(static_cast<double>(range()) + 0.5);
         log_range_ = static_cast<int>(std::ceil(inv_log_base * log_range));
       }
@@ -110,7 +111,7 @@
     unsigned_type range_;  // max - min
     int log_range_;        // ceil(logN(range_))
 
-    static_assert(std::is_integral<IntType>::value,
+    static_assert(random_internal::IsIntegral<IntType>::value,
                   "Class-template absl::log_uniform_int_distribution<> must be "
                   "parameterized using an integral type.");
   };
@@ -136,7 +137,7 @@
   template <typename URBG>
   result_type operator()(URBG& g,  // NOLINT(runtime/references)
                          const param_type& p) {
-    return (p.min)() + Generate(g, p);
+    return static_cast<result_type>((p.min)() + Generate(g, p));
   }
 
   result_type(min)() const { return (param_.min)(); }
@@ -190,8 +191,8 @@
                 ? (std::numeric_limits<unsigned_type>::max)()
                 : (static_cast<unsigned_type>(1) << e) - 1;
   } else {
-    const double r = std::pow(p.base(), d);
-    const double s = (r * p.base()) - 1.0;
+    const double r = std::pow(static_cast<double>(p.base()), d);
+    const double s = (r * static_cast<double>(p.base())) - 1.0;
 
     base_e =
         (r > static_cast<double>((std::numeric_limits<unsigned_type>::max)()))
@@ -208,7 +209,8 @@
   const unsigned_type hi = (top_e >= p.range()) ? p.range() : top_e;
 
   // choose uniformly over [lo, hi]
-  return absl::uniform_int_distribution<result_type>(lo, hi)(g);
+  return absl::uniform_int_distribution<result_type>(
+      static_cast<result_type>(lo), static_cast<result_type>(hi))(g);
 }
 
 template <typename CharT, typename Traits, typename IntType>
diff --git a/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc b/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc
index 5e780d9..5df3eda 100644
--- a/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc
+++ b/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc
@@ -24,7 +24,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -42,7 +42,7 @@
 
 using IntTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t,  //
                                   uint8_t, uint16_t, uint32_t, uint64_t>;
-TYPED_TEST_CASE(LogUniformIntDistributionTypeTest, IntTypes);
+TYPED_TEST_SUITE(LogUniformIntDistributionTypeTest, IntTypes);
 
 TYPED_TEST(LogUniformIntDistributionTypeTest, SerializeTest) {
   using param_type =
@@ -108,8 +108,7 @@
       if (sample > sample_max) sample_max = sample;
       if (sample < sample_min) sample_min = sample;
     }
-    ABSL_INTERNAL_LOG(INFO,
-                      absl::StrCat("Range: ", +sample_min, ", ", +sample_max));
+    LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
   }
 }
 
@@ -182,16 +181,14 @@
   const double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
 
   if (chi_square > threshold) {
-    ABSL_INTERNAL_LOG(INFO, "values");
+    LOG(INFO) << "values";
     for (size_t i = 0; i < buckets.size(); i++) {
-      ABSL_INTERNAL_LOG(INFO, absl::StrCat(i, ": ", buckets[i]));
+      LOG(INFO) << i << ": " << buckets[i];
     }
-    ABSL_INTERNAL_LOG(INFO,
-                      absl::StrFormat("trials=%d\n"
-                                      "%s(data, %d) = %f (%f)\n"
-                                      "%s @ 0.98 = %f",
-                                      trials, kChiSquared, dof, chi_square, p,
-                                      kChiSquared, threshold));
+    LOG(INFO) << "trials=" << trials << "\n"
+              << kChiSquared << "(data, " << dof << ") = " << chi_square << " ("
+              << p << ")\n"
+              << kChiSquared << " @ 0.98 = " << threshold;
   }
   return p;
 }
diff --git a/abseil-cpp/absl/random/mocking_bit_gen.h b/abseil-cpp/absl/random/mocking_bit_gen.h
index 6d2f2c8..89fa5a4 100644
--- a/abseil-cpp/absl/random/mocking_bit_gen.h
+++ b/abseil-cpp/absl/random/mocking_bit_gen.h
@@ -87,7 +87,7 @@
 //
 //   ON_CALL(absl::MockUniform<int>(), Call(bitgen, testing::_, testing::_))
 //       .WillByDefault([] (int low, int high) {
-//           return (low + high) / 2;
+//           return low + (high - low) / 2;
 //       });
 //
 //   EXPECT_EQ(absl::Uniform<int>(gen, 0, 10), 5);
@@ -104,10 +104,7 @@
 class MockingBitGen {
  public:
   MockingBitGen() = default;
-
-  ~MockingBitGen() {
-    for (const auto& del : deleters_) del();
-  }
+  ~MockingBitGen() = default;
 
   // URBG interface
   using result_type = absl::BitGen::result_type;
@@ -117,14 +114,6 @@
   result_type operator()() { return gen_(); }
 
  private:
-  using match_impl_fn = void (*)(void* mock_fn, void* t_erased_arg_tuple,
-                                 void* t_erased_result);
-
-  struct MockData {
-    void* mock_fn = nullptr;
-    match_impl_fn match_impl = nullptr;
-  };
-
   // GetMockFnType returns the testing::MockFunction for a result and tuple.
   // This method only exists for type deduction and is otherwise unimplemented.
   template <typename ResultT, typename... Args>
@@ -136,17 +125,46 @@
   // NOTE: MockFnCaller is essentially equivalent to the lambda:
   // [fn](auto... args) { return fn->Call(std::move(args)...)}
   // however that fails to build on some supported platforms.
-  template <typename ResultT, typename MockFnType, typename Tuple>
+  template <typename MockFnType, typename ResultT, typename Tuple>
   struct MockFnCaller;
+
   // specialization for std::tuple.
-  template <typename ResultT, typename MockFnType, typename... Args>
-  struct MockFnCaller<ResultT, MockFnType, std::tuple<Args...>> {
+  template <typename MockFnType, typename ResultT, typename... Args>
+  struct MockFnCaller<MockFnType, ResultT, std::tuple<Args...>> {
     MockFnType* fn;
     inline ResultT operator()(Args... args) {
       return fn->Call(std::move(args)...);
     }
   };
 
+  // FunctionHolder owns a particular ::testing::MockFunction associated with
+  // a mocked type signature, and implement the type-erased Apply call, which
+  // applies type-erased arguments to the mock.
+  class FunctionHolder {
+   public:
+    virtual ~FunctionHolder() = default;
+
+    // Call is a dispatch function which converts the
+    // generic type-erased parameters into a specific mock invocation call.
+    virtual void Apply(/*ArgTupleT*/ void* args_tuple,
+                       /*ResultT*/ void* result) = 0;
+  };
+
+  template <typename MockFnType, typename ResultT, typename ArgTupleT>
+  class FunctionHolderImpl final : public FunctionHolder {
+   public:
+    void Apply(void* args_tuple, void* result) override {
+      // Requires tuple_args to point to a ArgTupleT, which is a
+      // std::tuple<Args...> used to invoke the mock function. Requires result
+      // to point to a ResultT, which is the result of the call.
+      *static_cast<ResultT*>(result) =
+          absl::apply(MockFnCaller<MockFnType, ResultT, ArgTupleT>{&mock_fn_},
+                      *static_cast<ArgTupleT*>(args_tuple));
+    }
+
+    MockFnType mock_fn_;
+  };
+
   // MockingBitGen::RegisterMock
   //
   // RegisterMock<ResultT, ArgTupleT>(FastTypeIdType) is the main extension
@@ -157,37 +175,31 @@
   //
   // The returned MockFunction<...> type can be used to setup additional
   // distribution parameters of the expectation.
-  template <typename ResultT, typename ArgTupleT>
-  auto RegisterMock(base_internal::FastTypeIdType type)
+  template <typename ResultT, typename ArgTupleT, typename SelfT>
+  auto RegisterMock(SelfT&, base_internal::FastTypeIdType type)
       -> decltype(GetMockFnType(std::declval<ResultT>(),
                                 std::declval<ArgTupleT>()))& {
-    using MockFnType = decltype(
-        GetMockFnType(std::declval<ResultT>(), std::declval<ArgTupleT>()));
-    auto& mock = mocks_[type];
-    if (!mock.mock_fn) {
-      auto* mock_fn = new MockFnType;
-      mock.mock_fn = mock_fn;
-      mock.match_impl = &MatchImpl<ResultT, ArgTupleT>;
-      deleters_.emplace_back([mock_fn] { delete mock_fn; });
-    }
-    return *static_cast<MockFnType*>(mock.mock_fn);
-  }
+    using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
+                                              std::declval<ArgTupleT>()));
 
-  // MockingBitGen::MatchImpl<> is a dispatch function which converts the
-  // generic type-erased parameters into a specific mock invocation call.
-  // Requires tuple_args to point to a ArgTupleT, which is a std::tuple<Args...>
-  // used to invoke the mock function.
-  // Requires result to point to a ResultT, which is the result of the call.
-  template <typename ResultT, typename ArgTupleT>
-  static void MatchImpl(/*MockFnType<ResultT, Args...>*/ void* mock_fn,
-                        /*ArgTupleT*/ void* args_tuple,
-                        /*ResultT*/ void* result) {
-    using MockFnType = decltype(
-        GetMockFnType(std::declval<ResultT>(), std::declval<ArgTupleT>()));
-    *static_cast<ResultT*>(result) = absl::apply(
-        MockFnCaller<ResultT, MockFnType, ArgTupleT>{
-            static_cast<MockFnType*>(mock_fn)},
-        *static_cast<ArgTupleT*>(args_tuple));
+    using WrappedFnType = absl::conditional_t<
+        std::is_same<SelfT, ::testing::NiceMock<absl::MockingBitGen>>::value,
+        ::testing::NiceMock<MockFnType>,
+        absl::conditional_t<
+            std::is_same<SelfT,
+                         ::testing::NaggyMock<absl::MockingBitGen>>::value,
+            ::testing::NaggyMock<MockFnType>,
+            absl::conditional_t<
+                std::is_same<SelfT,
+                             ::testing::StrictMock<absl::MockingBitGen>>::value,
+                ::testing::StrictMock<MockFnType>, MockFnType>>>;
+
+    using ImplT = FunctionHolderImpl<WrappedFnType, ResultT, ArgTupleT>;
+    auto& mock = mocks_[type];
+    if (!mock) {
+      mock = absl::make_unique<ImplT>();
+    }
+    return static_cast<ImplT*>(mock.get())->mock_fn_;
   }
 
   // MockingBitGen::InvokeMock
@@ -206,13 +218,13 @@
     // Trigger a mock, if there exists one that matches `param`.
     auto it = mocks_.find(type);
     if (it == mocks_.end()) return false;
-    auto* mock_data = static_cast<MockData*>(&it->second);
-    mock_data->match_impl(mock_data->mock_fn, args_tuple, result);
+    it->second->Apply(args_tuple, result);
     return true;
   }
 
-  absl::flat_hash_map<base_internal::FastTypeIdType, MockData> mocks_;
-  std::vector<std::function<void()>> deleters_;
+  absl::flat_hash_map<base_internal::FastTypeIdType,
+                      std::unique_ptr<FunctionHolder>>
+      mocks_;
   absl::BitGen gen_;
 
   template <typename>
diff --git a/abseil-cpp/absl/random/mocking_bit_gen_test.cc b/abseil-cpp/absl/random/mocking_bit_gen_test.cc
index f0ffc9a..c713cea 100644
--- a/abseil-cpp/absl/random/mocking_bit_gen_test.cc
+++ b/abseil-cpp/absl/random/mocking_bit_gen_test.cc
@@ -15,6 +15,7 @@
 //
 #include "absl/random/mocking_bit_gen.h"
 
+#include <cmath>
 #include <numeric>
 #include <random>
 
@@ -26,6 +27,8 @@
 #include "absl/random/random.h"
 
 namespace {
+
+using ::testing::_;
 using ::testing::Ne;
 using ::testing::Return;
 
@@ -326,8 +329,9 @@
 
   absl::MockingBitGen gen;
   ON_CALL(absl::MockPoisson<int>(), Call(gen, _))
-      .WillByDefault(
-          [](double lambda) { return static_cast<int>(lambda * 10); });
+      .WillByDefault([](double lambda) {
+        return static_cast<int>(std::rint(lambda * 10));
+      });
   EXPECT_EQ(absl::Poisson<int>(gen, 1.7), 17);
   EXPECT_EQ(absl::Poisson<int>(gen, 0.03), 0);
 }
@@ -344,4 +348,47 @@
   EXPECT_EQ(absl::Poisson<int>(gen, 2.0), 4);
 }
 
+TEST(MockingBitGen, NiceMock) {
+  ::testing::NiceMock<absl::MockingBitGen> gen;
+  ON_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillByDefault(Return(145));
+
+  ON_CALL(absl::MockPoisson<int>(), Call(gen, _)).WillByDefault(Return(3));
+
+  EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
+  EXPECT_EQ(absl::Uniform(gen, 10, 1000), 145);
+  EXPECT_EQ(absl::Uniform(gen, 100, 1000), 145);
+}
+
+TEST(MockingBitGen, NaggyMock) {
+  // This is difficult to test, as only the output matters, so just verify
+  // that ON_CALL can be installed. Anything else requires log inspection.
+  ::testing::NaggyMock<absl::MockingBitGen> gen;
+
+  ON_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillByDefault(Return(145));
+  ON_CALL(absl::MockPoisson<int>(), Call(gen, _)).WillByDefault(Return(3));
+
+  EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
+}
+
+TEST(MockingBitGen, StrictMock_NotEnough) {
+  EXPECT_NONFATAL_FAILURE(
+      []() {
+        ::testing::StrictMock<absl::MockingBitGen> gen;
+        EXPECT_CALL(absl::MockUniform<int>(), Call(gen, _, _))
+            .WillOnce(Return(145));
+      }(),
+      "unsatisfied and active");
+}
+
+TEST(MockingBitGen, StrictMock_TooMany) {
+  ::testing::StrictMock<absl::MockingBitGen> gen;
+
+  EXPECT_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillOnce(Return(145));
+  EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
+
+  EXPECT_NONFATAL_FAILURE(
+      [&]() { EXPECT_EQ(absl::Uniform(gen, 10, 1000), 0); }(),
+      "over-saturated and active");
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/random/poisson_distribution.h b/abseil-cpp/absl/random/poisson_distribution.h
index cb5f5d5..f457308 100644
--- a/abseil-cpp/absl/random/poisson_distribution.h
+++ b/abseil-cpp/absl/random/poisson_distribution.h
@@ -26,6 +26,7 @@
 #include "absl/random/internal/fastmath.h"
 #include "absl/random/internal/generate_real.h"
 #include "absl/random/internal/iostream_state_saver.h"
+#include "absl/random/internal/traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -80,7 +81,7 @@
     double log_k_;
     int split_;
 
-    static_assert(std::is_integral<IntType>::value,
+    static_assert(random_internal::IsIntegral<IntType>::value,
                   "Class-template absl::poisson_distribution<> must be "
                   "parameterized using an integral type.");
   };
@@ -133,7 +134,8 @@
 poisson_distribution<IntType>::param_type::param_type(double mean)
     : mean_(mean), split_(0) {
   assert(mean >= 0);
-  assert(mean <= (std::numeric_limits<result_type>::max)());
+  assert(mean <=
+         static_cast<double>((std::numeric_limits<result_type>::max)()));
   // As a defensive measure, avoid large values of the mean.  The rejection
   // algorithm used does not support very large values well.  It my be worth
   // changing algorithms to better deal with these cases.
@@ -222,8 +224,9 @@
     // clang-format on
     const double lhs = 2.0 * std::log(u) + p.log_k_ + s;
     if (lhs < rhs) {
-      return x > (max)() ? (max)()
-                         : static_cast<result_type>(x);  // f(x)/k >= u^2
+      return x > static_cast<double>((max)())
+                 ? (max)()
+                 : static_cast<result_type>(x);  // f(x)/k >= u^2
     }
   }
 }
diff --git a/abseil-cpp/absl/random/poisson_distribution_test.cc b/abseil-cpp/absl/random/poisson_distribution_test.cc
index 8baabd1..5475596 100644
--- a/abseil-cpp/absl/random/poisson_distribution_test.cc
+++ b/abseil-cpp/absl/random/poisson_distribution_test.cc
@@ -25,9 +25,9 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/container/flat_hash_map.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -73,7 +73,7 @@
 
 using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
                                   uint8_t, uint16_t, uint32_t, uint64_t>;
-TYPED_TEST_CASE(PoissonDistributionInterfaceTest, IntTypes);
+TYPED_TEST_SUITE(PoissonDistributionInterfaceTest, IntTypes);
 
 TYPED_TEST(PoissonDistributionInterfaceTest, SerializeTest) {
   using param_type = typename absl::poisson_distribution<TypeParam>::param_type;
@@ -134,8 +134,8 @@
       if (sample < sample_min) sample_min = sample;
     }
 
-    ABSL_INTERNAL_LOG(INFO, absl::StrCat("Range {", param.mean(), "}: ",
-                                         +sample_min, ", ", +sample_max));
+    LOG(INFO) << "Range {" << param.mean() << "}: " << sample_min << ", "
+              << sample_max;
 
     // Validate stream serialization.
     std::stringstream ss;
@@ -188,10 +188,9 @@
   }
 
   void LogCDF() {
-    ABSL_INTERNAL_LOG(INFO, absl::StrCat("CDF (mean = ", mean_, ")"));
+    LOG(INFO) << "CDF (mean = " << mean_ << ")";
     for (const auto c : cdf_) {
-      ABSL_INTERNAL_LOG(INFO,
-                        absl::StrCat(c.index, ": pmf=", c.pmf, " cdf=", c.cdf));
+      LOG(INFO) << c.index << ": pmf=" << c.pmf << " cdf=" << c.cdf;
     }
   }
 
@@ -286,16 +285,15 @@
   const bool pass = absl::random_internal::Near("z", z, 0.0, max_err);
 
   if (!pass) {
-    ABSL_INTERNAL_LOG(
-        INFO, absl::StrFormat("p=%f max_err=%f\n"
-                              " mean=%f vs. %f\n"
-                              " stddev=%f vs. %f\n"
-                              " skewness=%f vs. %f\n"
-                              " kurtosis=%f vs. %f\n"
-                              " z=%f",
-                              p, max_err, m.mean, mean(), std::sqrt(m.variance),
-                              stddev(), m.skewness, skew(), m.kurtosis,
-                              kurtosis(), z));
+    // clang-format off
+    LOG(INFO)
+        << "p=" << p << " max_err=" << max_err << "\n"
+           " mean=" << m.mean << " vs. " << mean() << "\n"
+           " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
+           " skewness=" << m.skewness << " vs. " << skew() << "\n"
+           " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
+           " z=" << z;
+    // clang-format on
   }
   return pass;
 }
@@ -439,17 +437,16 @@
   if (chi_square > threshold) {
     LogCDF();
 
-    ABSL_INTERNAL_LOG(INFO, absl::StrCat("VALUES  buckets=", counts.size(),
-                                         "  samples=", kSamples));
+    LOG(INFO) << "VALUES  buckets=" << counts.size()
+              << "  samples=" << kSamples;
     for (size_t i = 0; i < counts.size(); i++) {
-      ABSL_INTERNAL_LOG(
-          INFO, absl::StrCat(cutoffs_[i], ": ", counts[i], " vs. E=", e[i]));
+      LOG(INFO) << cutoffs_[i] << ": " << counts[i] << " vs. E=" << e[i];
     }
 
-    ABSL_INTERNAL_LOG(
-        INFO,
-        absl::StrCat(kChiSquared, "(data, dof=", dof, ") = ", chi_square, " (",
-                     p, ")\n", " vs.\n", kChiSquared, " @ 0.98 = ", threshold));
+    LOG(INFO) << kChiSquared << "(data, dof=" << dof << ") = " << chi_square
+              << " (" << p << ")\n"
+              << " vs.\n"
+              << kChiSquared << " @ 0.98 = " << threshold;
   }
   return p;
 }
diff --git a/abseil-cpp/absl/random/random.h b/abseil-cpp/absl/random/random.h
index 71b6309..7672086 100644
--- a/abseil-cpp/absl/random/random.h
+++ b/abseil-cpp/absl/random/random.h
@@ -68,7 +68,7 @@
 //
 // `absl::BitGen` may be constructed with an optional seed sequence type,
 // conforming to [rand.req.seed_seq], which will be mixed with additional
-// non-deterministic data.
+// non-deterministic data as detailed below.
 //
 // Example:
 //
@@ -79,16 +79,16 @@
 //  // Generate an integer value in the closed interval [1,6]
 //  int die_roll2 = absl::uniform_int_distribution<int>(1, 6)(gen_with_seed);
 //
+// Constructing two `absl::BitGen`s with the same seed sequence in the same
+// process will produce the same sequence of variates, but need not do so across
+// multiple processes even if they're executing the same binary.
+//
 // `absl::BitGen` meets the requirements of the Uniform Random Bit Generator
 // (URBG) concept as per the C++17 standard [rand.req.urng] though differs
 // slightly with [rand.req.eng]. Like its standard library equivalents (e.g.
 // `std::mersenne_twister_engine`) `absl::BitGen` is not cryptographically
 // secure.
 //
-// Constructing two `absl::BitGen`s with the same seed sequence in the same
-// binary will produce the same sequence of variates within the same binary, but
-// need not do so across multiple binary invocations.
-//
 // This type has been optimized to perform better than Mersenne Twister
 // (https://en.wikipedia.org/wiki/Mersenne_Twister) and many other complex URBG
 // types on modern x86, ARM, and PPC architectures.
@@ -147,7 +147,7 @@
 //
 // `absl::InsecureBitGen` may be constructed with an optional seed sequence
 // type, conforming to [rand.req.seed_seq], which will be mixed with additional
-// non-deterministic data. (See std_seed_seq.h for more information.)
+// non-deterministic data, as detailed in the `absl::BitGen` comment.
 //
 // `absl::InsecureBitGen` meets the requirements of the Uniform Random Bit
 // Generator (URBG) concept as per the C++17 standard [rand.req.urng] though
diff --git a/abseil-cpp/absl/random/seed_sequences.h b/abseil-cpp/absl/random/seed_sequences.h
index ff1340c..c3af4b0 100644
--- a/abseil-cpp/absl/random/seed_sequences.h
+++ b/abseil-cpp/absl/random/seed_sequences.h
@@ -28,6 +28,7 @@
 #include <iterator>
 #include <random>
 
+#include "absl/base/config.h"
 #include "absl/random/internal/salted_seed_seq.h"
 #include "absl/random/internal/seed_material.h"
 #include "absl/random/seed_gen_exception.h"
diff --git a/abseil-cpp/absl/random/uniform_int_distribution.h b/abseil-cpp/absl/random/uniform_int_distribution.h
index da66564..fae8025 100644
--- a/abseil-cpp/absl/random/uniform_int_distribution.h
+++ b/abseil-cpp/absl/random/uniform_int_distribution.h
@@ -97,7 +97,7 @@
     result_type lo_;
     unsigned_type range_;
 
-    static_assert(std::is_integral<result_type>::value,
+    static_assert(random_internal::IsIntegral<result_type>::value,
                   "Class-template absl::uniform_int_distribution<> must be "
                   "parameterized using an integral type.");
   };  // param_type
@@ -125,7 +125,7 @@
   template <typename URBG>
   result_type operator()(
       URBG& gen, const param_type& param) {  // NOLINT(runtime/references)
-    return param.a() + Generate(gen, param.range());
+    return static_cast<result_type>(param.a() + Generate(gen, param.range()));
   }
 
   result_type a() const { return param_.a(); }
@@ -196,7 +196,7 @@
 uniform_int_distribution<IntType>::Generate(
     URBG& g,  // NOLINT(runtime/references)
     typename random_internal::make_unsigned_bits<IntType>::type R) {
-    random_internal::FastUniformBits<unsigned_type> fast_bits;
+  random_internal::FastUniformBits<unsigned_type> fast_bits;
   unsigned_type bits = fast_bits(g);
   const unsigned_type Lim = R + 1;
   if ((R & Lim) == 0) {
diff --git a/abseil-cpp/absl/random/uniform_int_distribution_test.cc b/abseil-cpp/absl/random/uniform_int_distribution_test.cc
index 276d72a..b40d618 100644
--- a/abseil-cpp/absl/random/uniform_int_distribution_test.cc
+++ b/abseil-cpp/absl/random/uniform_int_distribution_test.cc
@@ -19,11 +19,12 @@
 #include <iterator>
 #include <random>
 #include <sstream>
+#include <string>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -106,8 +107,7 @@
         sample_min = sample;
       }
     }
-    std::string msg = absl::StrCat("Range: ", +sample_min, ", ", +sample_max);
-    ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+    LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
   }
 }
 
@@ -136,7 +136,7 @@
       typename absl::uniform_int_distribution<TypeParam>::param_type;
 
   // We use a fixed bit generator for distribution accuracy tests.  This allows
-  // these tests to be deterministic, while still testing the qualify of the
+  // these tests to be deterministic, while still testing the quality of the
   // implementation.
   absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6};
 
@@ -172,7 +172,7 @@
   using absl::random_internal::kChiSquared;
 
   constexpr size_t kTrials = 1000;
-  constexpr int kBuckets = 50;  // inclusive, so actally +1
+  constexpr int kBuckets = 50;  // inclusive, so actually +1
   constexpr double kExpected =
       static_cast<double>(kTrials) / static_cast<double>(kBuckets);
 
@@ -184,7 +184,7 @@
   const TypeParam max = min + kBuckets;
 
   // We use a fixed bit generator for distribution accuracy tests.  This allows
-  // these tests to be deterministic, while still testing the qualify of the
+  // these tests to be deterministic, while still testing the quality of the
   // implementation.
   absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6};
 
@@ -209,7 +209,7 @@
     absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
     absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
                     kThreshold);
-    ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+    LOG(INFO) << msg;
     FAIL() << msg;
   }
 }
diff --git a/abseil-cpp/absl/random/uniform_real_distribution.h b/abseil-cpp/absl/random/uniform_real_distribution.h
index 5ba17b2..1968334 100644
--- a/abseil-cpp/absl/random/uniform_real_distribution.h
+++ b/abseil-cpp/absl/random/uniform_real_distribution.h
@@ -73,12 +73,12 @@
         : lo_(lo), hi_(hi), range_(hi - lo) {
       // [rand.dist.uni.real] preconditions 2 & 3
       assert(lo <= hi);
+
       // NOTE: For integral types, we can promote the range to an unsigned type,
       // which gives full width of the range. However for real (fp) types, this
       // is not possible, so value generation cannot use the full range of the
       // real type.
       assert(range_ <= (std::numeric_limits<result_type>::max)());
-      assert(std::isfinite(range_));
     }
 
     result_type a() const { return lo_; }
diff --git a/abseil-cpp/absl/random/uniform_real_distribution_test.cc b/abseil-cpp/absl/random/uniform_real_distribution_test.cc
index be107cd..260aac9 100644
--- a/abseil-cpp/absl/random/uniform_real_distribution_test.cc
+++ b/abseil-cpp/absl/random/uniform_real_distribution_test.cc
@@ -14,17 +14,20 @@
 
 #include "absl/random/uniform_real_distribution.h"
 
+#include <cfloat>
 #include <cmath>
 #include <cstdint>
 #include <iterator>
 #include <random>
 #include <sstream>
 #include <string>
+#include <type_traits>
 #include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
+#include "absl/numeric/internal/representation.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/internal/pcg_engine.h"
@@ -55,70 +58,94 @@
 template <typename RealType>
 class UniformRealDistributionTest : public ::testing::Test {};
 
-#if defined(__EMSCRIPTEN__)
-using RealTypes = ::testing::Types<float, double>;
-#else
-using RealTypes = ::testing::Types<float, double, long double>;
-#endif  // defined(__EMSCRIPTEN__)
+// double-double arithmetic is not supported well by either GCC or Clang; see
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
+// https://bugs.llvm.org/show_bug.cgi?id=49131, and
+// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests
+// with double doubles until compiler support is better.
+using RealTypes =
+    std::conditional<absl::numeric_internal::IsDoubleDouble(),
+                     ::testing::Types<float, double>,
+                     ::testing::Types<float, double, long double>>::type;
 
 TYPED_TEST_SUITE(UniformRealDistributionTest, RealTypes);
 
 TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
-  using param_type =
-      typename absl::uniform_real_distribution<TypeParam>::param_type;
+#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
+  // We're using an x87-compatible FPU, and intermediate operations are
+  // performed with 80-bit floats. This produces slightly different results from
+  // what we expect below.
+  GTEST_SKIP()
+      << "Skipping the test because we detected x87 floating-point semantics";
+#endif
+  using DistributionType = absl::uniform_real_distribution<TypeParam>;
+  using real_type = TypeParam;
+  using param_type = typename DistributionType::param_type;
 
-  constexpr const TypeParam a{1152921504606846976};
+  constexpr const real_type kMax = std::numeric_limits<real_type>::max();
+  constexpr const real_type kMin = std::numeric_limits<real_type>::min();
+  constexpr const real_type kEpsilon =
+      std::numeric_limits<real_type>::epsilon();
+  constexpr const real_type kLowest =
+      std::numeric_limits<real_type>::lowest();  // -max
+
+  const real_type kDenormMax = std::nextafter(kMin, real_type{0});
+  const real_type kOneMinusE =
+      std::nextafter(real_type{1}, real_type{0});  // 1 - epsilon
+
+  constexpr const real_type kTwo60{1152921504606846976};  // 2^60
 
   constexpr int kCount = 1000;
   absl::InsecureBitGen gen;
   for (const auto& param : {
            param_type(),
-           param_type(TypeParam(2.0), TypeParam(2.0)),  // Same
-           param_type(TypeParam(-0.1), TypeParam(0.1)),
-           param_type(TypeParam(0.05), TypeParam(0.12)),
-           param_type(TypeParam(-0.05), TypeParam(0.13)),
-           param_type(TypeParam(-0.05), TypeParam(-0.02)),
+           param_type(real_type{0}, real_type{1}),
+           param_type(real_type(-0.1), real_type(0.1)),
+           param_type(real_type(0.05), real_type(0.12)),
+           param_type(real_type(-0.05), real_type(0.13)),
+           param_type(real_type(-0.05), real_type(-0.02)),
+           // range = 0
+           param_type(real_type(2.0), real_type(2.0)),  // Same
            // double range = 0
            // 2^60 , 2^60 + 2^6
-           param_type(a, TypeParam(1152921504606847040)),
+           param_type(kTwo60, real_type(1152921504606847040)),
            // 2^60 , 2^60 + 2^7
-           param_type(a, TypeParam(1152921504606847104)),
+           param_type(kTwo60, real_type(1152921504606847104)),
            // double range = 2^8
            // 2^60 , 2^60 + 2^8
-           param_type(a, TypeParam(1152921504606847232)),
+           param_type(kTwo60, real_type(1152921504606847232)),
            // float range = 0
            // 2^60 , 2^60 + 2^36
-           param_type(a, TypeParam(1152921573326323712)),
+           param_type(kTwo60, real_type(1152921573326323712)),
            // 2^60 , 2^60 + 2^37
-           param_type(a, TypeParam(1152921642045800448)),
+           param_type(kTwo60, real_type(1152921642045800448)),
            // float range = 2^38
            // 2^60 , 2^60 + 2^38
-           param_type(a, TypeParam(1152921779484753920)),
+           param_type(kTwo60, real_type(1152921779484753920)),
            // Limits
-           param_type(0, std::numeric_limits<TypeParam>::max()),
-           param_type(std::numeric_limits<TypeParam>::lowest(), 0),
-           param_type(0, std::numeric_limits<TypeParam>::epsilon()),
-           param_type(-std::numeric_limits<TypeParam>::epsilon(),
-                      std::numeric_limits<TypeParam>::epsilon()),
-           param_type(std::numeric_limits<TypeParam>::epsilon(),
-                      2 * std::numeric_limits<TypeParam>::epsilon()),
+           param_type(0, kMax),
+           param_type(kLowest, 0),
+           param_type(0, kMin),
+           param_type(0, kEpsilon),
+           param_type(-kEpsilon, kEpsilon),
+           param_type(0, kOneMinusE),
+           param_type(0, kDenormMax),
        }) {
     // Validate parameters.
     const auto a = param.a();
     const auto b = param.b();
-    absl::uniform_real_distribution<TypeParam> before(a, b);
+    DistributionType before(a, b);
     EXPECT_EQ(before.a(), param.a());
     EXPECT_EQ(before.b(), param.b());
 
     {
-      absl::uniform_real_distribution<TypeParam> via_param(param);
+      DistributionType via_param(param);
       EXPECT_EQ(via_param, before);
     }
 
     std::stringstream ss;
     ss << before;
-    absl::uniform_real_distribution<TypeParam> after(TypeParam(1.0),
-                                                     TypeParam(3.1));
+    DistributionType after(real_type(1.0), real_type(3.1));
 
     EXPECT_NE(before.a(), after.a());
     EXPECT_NE(before.b(), after.b());
@@ -153,11 +180,10 @@
       }
     }
 
-    if (!std::is_same<TypeParam, long double>::value) {
+    if (!std::is_same<real_type, long double>::value) {
       // static_cast<double>(long double) can overflow.
-      std::string msg = absl::StrCat("Range: ", static_cast<double>(sample_min),
-                                     ", ", static_cast<double>(sample_max));
-      ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+      LOG(INFO) << "Range: " << static_cast<double>(sample_min) << ", "
+                << static_cast<double>(sample_max);
     }
   }
 }
@@ -167,33 +193,52 @@
 #pragma warning(disable:4756)  // Constant arithmetic overflow.
 #endif
 TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) {
+  using DistributionType = absl::uniform_real_distribution<TypeParam>;
+  using real_type = TypeParam;
+
 #if GTEST_HAS_DEATH_TEST
   // Hi < Lo
-  EXPECT_DEBUG_DEATH(
-      { absl::uniform_real_distribution<TypeParam> dist(10.0, 1.0); }, "");
+  EXPECT_DEBUG_DEATH({ DistributionType dist(10.0, 1.0); }, "");
 
   // Hi - Lo > numeric_limits<>::max()
   EXPECT_DEBUG_DEATH(
       {
-        absl::uniform_real_distribution<TypeParam> dist(
-            std::numeric_limits<TypeParam>::lowest(),
-            std::numeric_limits<TypeParam>::max());
+        DistributionType dist(std::numeric_limits<real_type>::lowest(),
+                              std::numeric_limits<real_type>::max());
       },
       "");
+
+  // kEpsilon guarantees that max + kEpsilon = inf.
+  const auto kEpsilon = std::nexttoward(
+      (std::numeric_limits<real_type>::max() -
+       std::nexttoward(std::numeric_limits<real_type>::max(), 0.0)) /
+          2,
+      std::numeric_limits<real_type>::max());
+  EXPECT_DEBUG_DEATH(
+      {
+        DistributionType dist(-kEpsilon, std::numeric_limits<real_type>::max());
+      },
+      "");
+  EXPECT_DEBUG_DEATH(
+      {
+        DistributionType dist(std::numeric_limits<real_type>::lowest(),
+                              kEpsilon);
+      },
+      "");
+
 #endif  // GTEST_HAS_DEATH_TEST
 #if defined(NDEBUG)
   // opt-mode, for invalid parameters, will generate a garbage value,
   // but should not enter an infinite loop.
   absl::InsecureBitGen gen;
   {
-    absl::uniform_real_distribution<TypeParam> dist(10.0, 1.0);
+    DistributionType dist(10.0, 1.0);
     auto x = dist(gen);
     EXPECT_FALSE(std::isnan(x)) << x;
   }
   {
-    absl::uniform_real_distribution<TypeParam> dist(
-        std::numeric_limits<TypeParam>::lowest(),
-        std::numeric_limits<TypeParam>::max());
+    DistributionType dist(std::numeric_limits<real_type>::lowest(),
+                          std::numeric_limits<real_type>::max());
     auto x = dist(gen);
     // Infinite result.
     EXPECT_FALSE(std::isfinite(x)) << x;
@@ -205,6 +250,8 @@
 #endif
 
 TYPED_TEST(UniformRealDistributionTest, TestMoments) {
+  using DistributionType = absl::uniform_real_distribution<TypeParam>;
+
   constexpr int kSize = 1000000;
   std::vector<double> values(kSize);
 
@@ -213,7 +260,7 @@
   // implementation.
   absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6};
 
-  absl::uniform_real_distribution<TypeParam> dist;
+  DistributionType dist;
   for (int i = 0; i < kSize; i++) {
     values[i] = dist(rng);
   }
@@ -227,9 +274,10 @@
 }
 
 TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
+  using DistributionType = absl::uniform_real_distribution<TypeParam>;
+  using param_type = typename DistributionType::param_type;
+
   using absl::random_internal::kChiSquared;
-  using param_type =
-      typename absl::uniform_real_distribution<TypeParam>::param_type;
 
   constexpr size_t kTrials = 100000;
   constexpr int kBuckets = 50;
@@ -254,7 +302,7 @@
     const double factor = kBuckets / (max_val - min_val);
 
     std::vector<int32_t> counts(kBuckets, 0);
-    absl::uniform_real_distribution<TypeParam> dist(param);
+    DistributionType dist(param);
     for (size_t i = 0; i < kTrials; i++) {
       auto x = dist(rng);
       auto bucket = static_cast<size_t>((x - min_val) * factor);
@@ -275,15 +323,18 @@
       absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
       absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
                       kThreshold);
-      ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+      LOG(INFO) << msg;
       FAIL() << msg;
     }
   }
 }
 
 TYPED_TEST(UniformRealDistributionTest, StabilityTest) {
+  using DistributionType = absl::uniform_real_distribution<TypeParam>;
+  using real_type = TypeParam;
+
   // absl::uniform_real_distribution stability relies only on
-  // random_internal::RandU64ToDouble and random_internal::RandU64ToFloat.
+  // random_internal::GenerateRealFromBits.
   absl::random_internal::sequence_urbg urbg(
       {0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
        0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
@@ -292,9 +343,9 @@
 
   std::vector<int> output(12);
 
-  absl::uniform_real_distribution<TypeParam> dist;
+  DistributionType dist;
   std::generate(std::begin(output), std::end(output), [&] {
-    return static_cast<int>(TypeParam(1000000) * dist(urbg));
+    return static_cast<int>(real_type(1000000) * dist(urbg));
   });
 
   EXPECT_THAT(
diff --git a/abseil-cpp/absl/random/zipf_distribution.h b/abseil-cpp/absl/random/zipf_distribution.h
index 22ebc75..03497b1 100644
--- a/abseil-cpp/absl/random/zipf_distribution.h
+++ b/abseil-cpp/absl/random/zipf_distribution.h
@@ -23,13 +23,14 @@
 #include <type_traits>
 
 #include "absl/random/internal/iostream_state_saver.h"
+#include "absl/random/internal/traits.h"
 #include "absl/random/uniform_real_distribution.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
 // absl::zipf_distribution produces random integer-values in the range [0, k],
-// distributed according to the discrete probability function:
+// distributed according to the unnormalized discrete probability function:
 //
 //  P(x) = (v + x) ^ -q
 //
@@ -94,7 +95,7 @@
     double hxm_;              // h(k + 0.5)
     double hx0_minus_hxm_;    // h(x0) - h(k + 0.5)
 
-    static_assert(std::is_integral<IntType>::value,
+    static_assert(random_internal::IsIntegral<IntType>::value,
                   "Class-template absl::zipf_distribution<> must be "
                   "parameterized using an integral type.");
   };
@@ -221,7 +222,7 @@
     const double u = p.hxm_ + v * p.hx0_minus_hxm_;
     const double x = p.hinv(u);
     k = rint(x);              // std::floor(x + 0.5);
-    if (k > p.k()) continue;  // reject k > max_k
+    if (k > static_cast<double>(p.k())) continue;  // reject k > max_k
     if (k - x <= p.s_) break;
     const double h = p.h(k + 0.5);
     const double r = p.pow_negative_q(p.v_ + k);
diff --git a/abseil-cpp/absl/random/zipf_distribution_test.cc b/abseil-cpp/absl/random/zipf_distribution_test.cc
index f8cf70e..801ec4f 100644
--- a/abseil-cpp/absl/random/zipf_distribution_test.cc
+++ b/abseil-cpp/absl/random/zipf_distribution_test.cc
@@ -25,7 +25,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/internal/chi_square.h"
 #include "absl/random/internal/pcg_engine.h"
 #include "absl/random/internal/sequence_urbg.h"
@@ -44,7 +44,7 @@
 
 using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
                                   uint8_t, uint16_t, uint32_t, uint64_t>;
-TYPED_TEST_CASE(ZipfDistributionTypedTest, IntTypes);
+TYPED_TEST_SUITE(ZipfDistributionTypedTest, IntTypes);
 
 TYPED_TEST(ZipfDistributionTypedTest, SerializeTest) {
   using param_type = typename absl::zipf_distribution<TypeParam>::param_type;
@@ -102,8 +102,7 @@
       if (sample > sample_max) sample_max = sample;
       if (sample < sample_min) sample_min = sample;
     }
-    ABSL_INTERNAL_LOG(INFO,
-                      absl::StrCat("Range: ", +sample_min, ", ", +sample_max));
+    LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
   }
 }
 
@@ -303,18 +302,15 @@
 
   // Log if the chi_squared value is above the threshold.
   if (chi_square > threshold) {
-    ABSL_INTERNAL_LOG(INFO, "values");
+    LOG(INFO) << "values";
     for (size_t i = 0; i < expected.size(); i++) {
-      ABSL_INTERNAL_LOG(INFO, absl::StrCat(points[i], ": ", buckets[i],
-                                           " vs. E=", expected[i]));
+      LOG(INFO) << points[i] << ": " << buckets[i] << " vs. E=" << expected[i];
     }
-    ABSL_INTERNAL_LOG(INFO, absl::StrCat("trials ", trials));
-    ABSL_INTERNAL_LOG(INFO,
-                      absl::StrCat("mean ", avg, " vs. expected ", mean()));
-    ABSL_INTERNAL_LOG(INFO, absl::StrCat(kChiSquared, "(data, ", dof, ") = ",
-                                         chi_square, " (", p_actual, ")"));
-    ABSL_INTERNAL_LOG(INFO,
-                      absl::StrCat(kChiSquared, " @ 0.9995 = ", threshold));
+    LOG(INFO) << "trials " << trials;
+    LOG(INFO) << "mean " << avg << " vs. expected " << mean();
+    LOG(INFO) << kChiSquared << "(data, " << dof << ") = " << chi_square << " ("
+              << p_actual << ")";
+    LOG(INFO) << kChiSquared << " @ 0.9995 = " << threshold;
     FAIL() << kChiSquared << " value of " << chi_square
            << " is above the threshold.";
   }
diff --git a/abseil-cpp/absl/status/BUILD.bazel b/abseil-cpp/absl/status/BUILD.bazel
index 189bd73..1f58b30 100644
--- a/abseil-cpp/absl/status/BUILD.bazel
+++ b/abseil-cpp/absl/status/BUILD.bazel
@@ -17,10 +17,10 @@
 # It will expand later to have utilities around `Status` like `StatusOr`,
 # `StatusBuilder` and macros.
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
     "ABSL_TEST_COPTS",
 )
 
@@ -40,14 +40,16 @@
         "status_payload_printer.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:atomic_hook",
-        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
+        "//absl/base:strerror",
         "//absl/container:inlined_vector",
         "//absl/debugging:stacktrace",
         "//absl/debugging:symbolize",
+        "//absl/functional:function_ref",
         "//absl/strings",
         "//absl/strings:cord",
         "//absl/strings:str_format",
@@ -59,6 +61,7 @@
     name = "status_test",
     srcs = ["status_test.cc"],
     copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":status",
         "//absl/strings",
@@ -76,8 +79,10 @@
         "statusor.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":status",
+        "//absl/base",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/meta:type_traits",
@@ -96,6 +101,7 @@
         ":statusor",
         "//absl/base",
         "//absl/memory",
+        "//absl/strings",
         "//absl/types:any",
         "//absl/utility",
         "@com_google_googletest//:gtest_main",
diff --git a/abseil-cpp/absl/status/CMakeLists.txt b/abseil-cpp/absl/status/CMakeLists.txt
index f0d798a..4a3c5d6 100644
--- a/abseil-cpp/absl/status/CMakeLists.txt
+++ b/abseil-cpp/absl/status/CMakeLists.txt
@@ -25,18 +25,22 @@
     "status_payload_printer.cc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
+  DEFINES
+    "$<$<PLATFORM_ID:AIX>:_LINUX_SOURCE_COMPAT>"
   DEPS
     absl::atomic_hook
     absl::config
-    absl::core_headers
-    absl::raw_logging_internal
-    absl::inlined_vector
-    absl::stacktrace
-    absl::symbolize
-    absl::strings
     absl::cord
-    absl::str_format
+    absl::core_headers
+    absl::function_ref
+    absl::inlined_vector
     absl::optional
+    absl::raw_logging_internal
+    absl::stacktrace
+    absl::str_format
+    absl::strerror
+    absl::strings
+    absl::symbolize
   PUBLIC
 )
 
@@ -50,7 +54,7 @@
   DEPS
     absl::status
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -64,6 +68,7 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::base
     absl::status
     absl::core_headers
     absl::raw_logging_internal
@@ -84,5 +89,5 @@
   DEPS
     absl::status
     absl::statusor
-    gmock_main
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/status/internal/status_internal.h b/abseil-cpp/absl/status/internal/status_internal.h
index 1f82b8e..6198e72 100644
--- a/abseil-cpp/absl/status/internal/status_internal.h
+++ b/abseil-cpp/absl/status/internal/status_internal.h
@@ -14,11 +14,32 @@
 #ifndef ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
 #define ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
 
+#include <memory>
 #include <string>
+#include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/container/inlined_vector.h"
 #include "absl/strings/cord.h"
 
+#ifndef SWIG
+// Disabled for SWIG as it doesn't parse attributes correctly.
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
+// as part of a class definitions (b/6995610), so we use a forward declaration.
+//
+// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict
+// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+class [[nodiscard]] Status;
+#else
+class ABSL_MUST_USE_RESULT Status;
+#endif
+ABSL_NAMESPACE_END
+}  // namespace absl
+#endif  // !SWIG
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
@@ -36,13 +57,32 @@
 
 // Reference-counted representation of Status data.
 struct StatusRep {
+  StatusRep(absl::StatusCode code_arg, absl::string_view message_arg,
+            std::unique_ptr<status_internal::Payloads> payloads_arg)
+      : ref(int32_t{1}),
+        code(code_arg),
+        message(message_arg),
+        payloads(std::move(payloads_arg)) {}
+
   std::atomic<int32_t> ref;
   absl::StatusCode code;
+
+  // As an internal implementation detail, we guarantee that if status.message()
+  // is non-empty, then the resulting string_view is null terminated.
+  // This is required to implement 'StatusMessageAsCStr(...)'
   std::string message;
   std::unique_ptr<status_internal::Payloads> payloads;
 };
 
 absl::StatusCode MapToLocalCode(int value);
+
+// Returns a pointer to a newly-allocated string with the given `prefix`,
+// suitable for output as an error message in assertion/`CHECK()` failures.
+//
+// This is an internal implementation detail for Abseil logging.
+std::string* MakeCheckFailString(const absl::Status* status,
+                                 const char* prefix);
+
 }  // namespace status_internal
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/status/internal/statusor_internal.h b/abseil-cpp/absl/status/internal/statusor_internal.h
index 96e41da..49cead7 100644
--- a/abseil-cpp/absl/status/internal/statusor_internal.h
+++ b/abseil-cpp/absl/status/internal/statusor_internal.h
@@ -17,6 +17,7 @@
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/meta/type_traits.h"
 #include "absl/status/status.h"
 #include "absl/utility/utility.h"
@@ -68,11 +69,8 @@
 template <typename T, typename U>
 struct IsDirectInitializationAmbiguous
     : public absl::conditional_t<
-          std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
-                       U>::value,
-          std::false_type,
-          IsDirectInitializationAmbiguous<
-              T, absl::remove_cv_t<absl::remove_reference_t<U>>>> {};
+          std::is_same<absl::remove_cvref_t<U>, U>::value, std::false_type,
+          IsDirectInitializationAmbiguous<T, absl::remove_cvref_t<U>>> {};
 
 template <typename T, typename V>
 struct IsDirectInitializationAmbiguous<T, absl::StatusOr<V>>
@@ -83,14 +81,11 @@
 template <typename T, typename U>
 using IsDirectInitializationValid = absl::disjunction<
     // Short circuits if T is basically U.
-    std::is_same<T, absl::remove_cv_t<absl::remove_reference_t<U>>>,
+    std::is_same<T, absl::remove_cvref_t<U>>,
     absl::negation<absl::disjunction<
-        std::is_same<absl::StatusOr<T>,
-                     absl::remove_cv_t<absl::remove_reference_t<U>>>,
-        std::is_same<absl::Status,
-                     absl::remove_cv_t<absl::remove_reference_t<U>>>,
-        std::is_same<absl::in_place_t,
-                     absl::remove_cv_t<absl::remove_reference_t<U>>>,
+        std::is_same<absl::StatusOr<T>, absl::remove_cvref_t<U>>,
+        std::is_same<absl::Status, absl::remove_cvref_t<U>>,
+        std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>,
         IsDirectInitializationAmbiguous<T, U>>>>;
 
 // This trait detects whether `StatusOr<T>::operator=(U&&)` is ambiguous, which
@@ -106,11 +101,8 @@
 template <typename T, typename U>
 struct IsForwardingAssignmentAmbiguous
     : public absl::conditional_t<
-          std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
-                       U>::value,
-          std::false_type,
-          IsForwardingAssignmentAmbiguous<
-              T, absl::remove_cv_t<absl::remove_reference_t<U>>>> {};
+          std::is_same<absl::remove_cvref_t<U>, U>::value, std::false_type,
+          IsForwardingAssignmentAmbiguous<T, absl::remove_cvref_t<U>>> {};
 
 template <typename T, typename U>
 struct IsForwardingAssignmentAmbiguous<T, absl::StatusOr<U>>
@@ -121,32 +113,25 @@
 template <typename T, typename U>
 using IsForwardingAssignmentValid = absl::disjunction<
     // Short circuits if T is basically U.
-    std::is_same<T, absl::remove_cv_t<absl::remove_reference_t<U>>>,
+    std::is_same<T, absl::remove_cvref_t<U>>,
     absl::negation<absl::disjunction<
-        std::is_same<absl::StatusOr<T>,
-                     absl::remove_cv_t<absl::remove_reference_t<U>>>,
-        std::is_same<absl::Status,
-                     absl::remove_cv_t<absl::remove_reference_t<U>>>,
-        std::is_same<absl::in_place_t,
-                     absl::remove_cv_t<absl::remove_reference_t<U>>>,
+        std::is_same<absl::StatusOr<T>, absl::remove_cvref_t<U>>,
+        std::is_same<absl::Status, absl::remove_cvref_t<U>>,
+        std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>,
         IsForwardingAssignmentAmbiguous<T, U>>>>;
 
 class Helper {
  public:
   // Move type-agnostic error handling to the .cc.
   static void HandleInvalidStatusCtorArg(Status*);
-  static void Crash(const absl::Status& status);
+  ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status);
 };
 
 // Construct an instance of T in `p` through placement new, passing Args... to
 // the constructor.
 // This abstraction is here mostly for the gcc performance fix.
 template <typename T, typename... Args>
-void PlacementNew(void* p, Args&&... args) {
-#if defined(__GNUC__) && !defined(__clang__)
-  // Teach gcc that 'p' cannot be null, fixing code size issues.
-  if (p == nullptr) __builtin_unreachable();
-#endif
+ABSL_ATTRIBUTE_NONNULL(1) void PlacementNew(void* p, Args&&... args) {
   new (p) T(std::forward<Args>(args)...);
 }
 
@@ -215,7 +200,7 @@
   template <typename U,
             absl::enable_if_t<std::is_constructible<absl::Status, U&&>::value,
                               int> = 0>
-  explicit StatusOrData(U&& v) : status_(v) {
+  explicit StatusOrData(U&& v) : status_(std::forward<U>(v)) {
     EnsureNotOk();
   }
 
diff --git a/abseil-cpp/absl/status/status.cc b/abseil-cpp/absl/status/status.cc
index a27fd8b..26e6829 100644
--- a/abseil-cpp/absl/status/status.cc
+++ b/abseil-cpp/absl/status/status.cc
@@ -13,9 +13,14 @@
 // limitations under the License.
 #include "absl/status/status.h"
 
+#include <errno.h>
+
 #include <cassert>
+#include <utility>
 
 #include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/strerror.h"
+#include "absl/base/macros.h"
 #include "absl/debugging/stacktrace.h"
 #include "absl/debugging/symbolize.h"
 #include "absl/status/status_payload_printer.h"
@@ -74,15 +79,15 @@
 
 namespace status_internal {
 
-static int FindPayloadIndexByUrl(const Payloads* payloads,
-                                 absl::string_view type_url) {
-  if (payloads == nullptr) return -1;
+static absl::optional<size_t> FindPayloadIndexByUrl(
+    const Payloads* payloads, absl::string_view type_url) {
+  if (payloads == nullptr) return absl::nullopt;
 
   for (size_t i = 0; i < payloads->size(); ++i) {
     if ((*payloads)[i].type_url == type_url) return i;
   }
 
-  return -1;
+  return absl::nullopt;
 }
 
 // Convert canonical code to a value known to this binary.
@@ -116,8 +121,9 @@
 absl::optional<absl::Cord> Status::GetPayload(
     absl::string_view type_url) const {
   const auto* payloads = GetPayloads();
-  int index = status_internal::FindPayloadIndexByUrl(payloads, type_url);
-  if (index != -1) return (*payloads)[index].payload;
+  absl::optional<size_t> index =
+      status_internal::FindPayloadIndexByUrl(payloads, type_url);
+  if (index.has_value()) return (*payloads)[index.value()].payload;
 
   return absl::nullopt;
 }
@@ -132,10 +138,10 @@
     rep->payloads = absl::make_unique<status_internal::Payloads>();
   }
 
-  int index =
+  absl::optional<size_t> index =
       status_internal::FindPayloadIndexByUrl(rep->payloads.get(), type_url);
-  if (index != -1) {
-    (*rep->payloads)[index].payload = std::move(payload);
+  if (index.has_value()) {
+    (*rep->payloads)[index.value()].payload = std::move(payload);
     return;
   }
 
@@ -143,10 +149,11 @@
 }
 
 bool Status::ErasePayload(absl::string_view type_url) {
-  int index = status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url);
-  if (index != -1) {
+  absl::optional<size_t> index =
+      status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url);
+  if (index.has_value()) {
     PrepareToModify();
-    GetPayloads()->erase(GetPayloads()->begin() + index);
+    GetPayloads()->erase(GetPayloads()->begin() + index.value());
     if (GetPayloads()->empty() && message().empty()) {
       // Special case: If this can be represented inlined, it MUST be
       // inlined (EqualsSlow depends on this behavior).
@@ -161,7 +168,7 @@
 }
 
 void Status::ForEachPayload(
-    const std::function<void(absl::string_view, const absl::Cord&)>& visitor)
+    absl::FunctionRef<void(absl::string_view, const absl::Cord&)> visitor)
     const {
   if (auto* payloads = GetPayloads()) {
     bool in_reverse =
@@ -185,11 +192,16 @@
 }
 
 const std::string* Status::EmptyString() {
-  static std::string* empty_string = new std::string();
-  return empty_string;
+  static union EmptyString {
+    std::string str;
+    ~EmptyString() {}
+  } empty = {{}};
+  return &empty.str;
 }
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr const char Status::kMovedFromString[];
+#endif
 
 const std::string* Status::MovedFromString() {
   static std::string* moved_from_string = new std::string(kMovedFromString);
@@ -207,20 +219,10 @@
   }
 }
 
-uintptr_t Status::NewRep(absl::StatusCode code, absl::string_view msg,
-                         std::unique_ptr<status_internal::Payloads> payloads) {
-  status_internal::StatusRep* rep = new status_internal::StatusRep;
-  rep->ref.store(1, std::memory_order_relaxed);
-  rep->code = code;
-  rep->message.assign(msg.data(), msg.size());
-  rep->payloads = std::move(payloads);
-  return PointerToRep(rep);
-}
-
 Status::Status(absl::StatusCode code, absl::string_view msg)
     : rep_(CodeToInlinedRep(code)) {
   if (code != absl::StatusCode::kOk && !msg.empty()) {
-    rep_ = NewRep(code, msg, nullptr);
+    rep_ = PointerToRep(new status_internal::StatusRep(code, msg, nullptr));
   }
 }
 
@@ -239,8 +241,9 @@
 void Status::PrepareToModify() {
   ABSL_RAW_CHECK(!ok(), "PrepareToModify shouldn't be called on OK status.");
   if (IsInlined(rep_)) {
-    rep_ = NewRep(static_cast<absl::StatusCode>(raw_code()),
-                  absl::string_view(), nullptr);
+    rep_ = PointerToRep(new status_internal::StatusRep(
+        static_cast<absl::StatusCode>(raw_code()), absl::string_view(),
+        nullptr));
     return;
   }
 
@@ -251,7 +254,9 @@
     if (rep->payloads) {
       payloads = absl::make_unique<status_internal::Payloads>(*rep->payloads);
     }
-    rep_ = NewRep(rep->code, message(), std::move(payloads));
+    status_internal::StatusRep* const new_rep = new status_internal::StatusRep(
+        rep->code, message(), std::move(payloads));
+    rep_ = PointerToRep(new_rep);
     UnrefNonInlined(rep_i);
   }
 }
@@ -290,26 +295,32 @@
   return true;
 }
 
-std::string Status::ToStringSlow() const {
+std::string Status::ToStringSlow(StatusToStringMode mode) const {
   std::string text;
   absl::StrAppend(&text, absl::StatusCodeToString(code()), ": ", message());
-  status_internal::StatusPayloadPrinter printer =
-      status_internal::GetStatusPayloadPrinter();
-  this->ForEachPayload([&](absl::string_view type_url,
-                           const absl::Cord& payload) {
-    absl::optional<std::string> result;
-    if (printer) result = printer(type_url, payload);
-    absl::StrAppend(
-        &text, " [", type_url, "='",
-        result.has_value() ? *result : absl::CHexEscape(std::string(payload)),
-        "']");
-  });
+
+  const bool with_payload = (mode & StatusToStringMode::kWithPayload) ==
+                            StatusToStringMode::kWithPayload;
+
+  if (with_payload) {
+    status_internal::StatusPayloadPrinter printer =
+        status_internal::GetStatusPayloadPrinter();
+    this->ForEachPayload([&](absl::string_view type_url,
+                             const absl::Cord& payload) {
+      absl::optional<std::string> result;
+      if (printer) result = printer(type_url, payload);
+      absl::StrAppend(
+          &text, " [", type_url, "='",
+          result.has_value() ? *result : absl::CHexEscape(std::string(payload)),
+          "']");
+    });
+  }
 
   return text;
 }
 
 std::ostream& operator<<(std::ostream& os, const Status& x) {
-  os << x.ToString();
+  os << x.ToString(StatusToStringMode::kWithEverything);
   return os;
 }
 
@@ -441,5 +452,176 @@
   return status.code() == absl::StatusCode::kUnknown;
 }
 
+StatusCode ErrnoToStatusCode(int error_number) {
+  switch (error_number) {
+    case 0:
+      return StatusCode::kOk;
+    case EINVAL:        // Invalid argument
+    case ENAMETOOLONG:  // Filename too long
+    case E2BIG:         // Argument list too long
+    case EDESTADDRREQ:  // Destination address required
+    case EDOM:          // Mathematics argument out of domain of function
+    case EFAULT:        // Bad address
+    case EILSEQ:        // Illegal byte sequence
+    case ENOPROTOOPT:   // Protocol not available
+    case ENOSTR:        // Not a STREAM
+    case ENOTSOCK:      // Not a socket
+    case ENOTTY:        // Inappropriate I/O control operation
+    case EPROTOTYPE:    // Protocol wrong type for socket
+    case ESPIPE:        // Invalid seek
+      return StatusCode::kInvalidArgument;
+    case ETIMEDOUT:  // Connection timed out
+    case ETIME:      // Timer expired
+      return StatusCode::kDeadlineExceeded;
+    case ENODEV:  // No such device
+    case ENOENT:  // No such file or directory
+#ifdef ENOMEDIUM
+    case ENOMEDIUM:  // No medium found
+#endif
+    case ENXIO:  // No such device or address
+    case ESRCH:  // No such process
+      return StatusCode::kNotFound;
+    case EEXIST:         // File exists
+    case EADDRNOTAVAIL:  // Address not available
+    case EALREADY:       // Connection already in progress
+#ifdef ENOTUNIQ
+    case ENOTUNIQ:  // Name not unique on network
+#endif
+      return StatusCode::kAlreadyExists;
+    case EPERM:   // Operation not permitted
+    case EACCES:  // Permission denied
+#ifdef ENOKEY
+    case ENOKEY:  // Required key not available
+#endif
+    case EROFS:  // Read only file system
+      return StatusCode::kPermissionDenied;
+    case ENOTEMPTY:   // Directory not empty
+    case EISDIR:      // Is a directory
+    case ENOTDIR:     // Not a directory
+    case EADDRINUSE:  // Address already in use
+    case EBADF:       // Invalid file descriptor
+#ifdef EBADFD
+    case EBADFD:  // File descriptor in bad state
+#endif
+    case EBUSY:    // Device or resource busy
+    case ECHILD:   // No child processes
+    case EISCONN:  // Socket is connected
+#ifdef EISNAM
+    case EISNAM:  // Is a named type file
+#endif
+#ifdef ENOTBLK
+    case ENOTBLK:  // Block device required
+#endif
+    case ENOTCONN:  // The socket is not connected
+    case EPIPE:     // Broken pipe
+#ifdef ESHUTDOWN
+    case ESHUTDOWN:  // Cannot send after transport endpoint shutdown
+#endif
+    case ETXTBSY:  // Text file busy
+#ifdef EUNATCH
+    case EUNATCH:  // Protocol driver not attached
+#endif
+      return StatusCode::kFailedPrecondition;
+    case ENOSPC:  // No space left on device
+#ifdef EDQUOT
+    case EDQUOT:  // Disk quota exceeded
+#endif
+    case EMFILE:   // Too many open files
+    case EMLINK:   // Too many links
+    case ENFILE:   // Too many open files in system
+    case ENOBUFS:  // No buffer space available
+    case ENODATA:  // No message is available on the STREAM read queue
+    case ENOMEM:   // Not enough space
+    case ENOSR:    // No STREAM resources
+#ifdef EUSERS
+    case EUSERS:  // Too many users
+#endif
+      return StatusCode::kResourceExhausted;
+#ifdef ECHRNG
+    case ECHRNG:  // Channel number out of range
+#endif
+    case EFBIG:      // File too large
+    case EOVERFLOW:  // Value too large to be stored in data type
+    case ERANGE:     // Result too large
+      return StatusCode::kOutOfRange;
+#ifdef ENOPKG
+    case ENOPKG:  // Package not installed
+#endif
+    case ENOSYS:        // Function not implemented
+    case ENOTSUP:       // Operation not supported
+    case EAFNOSUPPORT:  // Address family not supported
+#ifdef EPFNOSUPPORT
+    case EPFNOSUPPORT:  // Protocol family not supported
+#endif
+    case EPROTONOSUPPORT:  // Protocol not supported
+#ifdef ESOCKTNOSUPPORT
+    case ESOCKTNOSUPPORT:  // Socket type not supported
+#endif
+    case EXDEV:  // Improper link
+      return StatusCode::kUnimplemented;
+    case EAGAIN:  // Resource temporarily unavailable
+#ifdef ECOMM
+    case ECOMM:  // Communication error on send
+#endif
+    case ECONNREFUSED:  // Connection refused
+    case ECONNABORTED:  // Connection aborted
+    case ECONNRESET:    // Connection reset
+    case EINTR:         // Interrupted function call
+#ifdef EHOSTDOWN
+    case EHOSTDOWN:  // Host is down
+#endif
+    case EHOSTUNREACH:  // Host is unreachable
+    case ENETDOWN:      // Network is down
+    case ENETRESET:     // Connection aborted by network
+    case ENETUNREACH:   // Network unreachable
+    case ENOLCK:        // No locks available
+    case ENOLINK:       // Link has been severed
+#ifdef ENONET
+    case ENONET:  // Machine is not on the network
+#endif
+      return StatusCode::kUnavailable;
+    case EDEADLK:  // Resource deadlock avoided
+#ifdef ESTALE
+    case ESTALE:  // Stale file handle
+#endif
+      return StatusCode::kAborted;
+    case ECANCELED:  // Operation cancelled
+      return StatusCode::kCancelled;
+    default:
+      return StatusCode::kUnknown;
+  }
+}
+
+namespace {
+std::string MessageForErrnoToStatus(int error_number,
+                                    absl::string_view message) {
+  return absl::StrCat(message, ": ",
+                      absl::base_internal::StrError(error_number));
+}
+}  // namespace
+
+Status ErrnoToStatus(int error_number, absl::string_view message) {
+  return Status(ErrnoToStatusCode(error_number),
+                MessageForErrnoToStatus(error_number, message));
+}
+
+namespace status_internal {
+
+std::string* MakeCheckFailString(const absl::Status* status,
+                                 const char* prefix) {
+  return new std::string(
+      absl::StrCat(prefix, " (",
+                   status->ToString(StatusToStringMode::kWithEverything), ")"));
+}
+
+}  // namespace status_internal
+
+const char* StatusMessageAsCStr(const Status& status) {
+  // As an internal implementation detail, we guarantee that if status.message()
+  // is non-empty, then the resulting string_view is null terminated.
+  auto sv_message = status.message();
+  return sv_message.empty() ? "" : sv_message.data();
+}
+
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/status/status.h b/abseil-cpp/absl/status/status.h
index 42f634e..595064c 100644
--- a/abseil-cpp/absl/status/status.h
+++ b/abseil-cpp/absl/status/status.h
@@ -24,11 +24,11 @@
 //   * A set of helper functions for creating status codes and checking their
 //     values
 //
-// Within Google, `absl::Status` is the primary mechanism for gracefully
-// handling errors across API boundaries (and in particular across RPC
-// boundaries). Some of these errors may be recoverable, but others may not.
-// Most functions that can produce a recoverable error should be designed to
-// return an `absl::Status` (or `absl::StatusOr`).
+// Within Google, `absl::Status` is the primary mechanism for communicating
+// errors in C++, and is used to represent error state in both in-process
+// library calls as well as RPC calls. Some of these errors may be recoverable,
+// but others may not. Most functions that can produce a recoverable error
+// should be designed to return an `absl::Status` (or `absl::StatusOr`).
 //
 // Example:
 //
@@ -51,12 +51,14 @@
 #ifndef ABSL_STATUS_STATUS_H_
 #define ABSL_STATUS_STATUS_H_
 
-#include <iostream>
+#include <ostream>
 #include <string>
+#include <utility>
 
-#include "absl/container/inlined_vector.h"
+#include "absl/functional/function_ref.h"
 #include "absl/status/internal/status_internal.h"
 #include "absl/strings/cord.h"
+#include "absl/strings/string_view.h"
 #include "absl/types/optional.h"
 
 namespace absl {
@@ -79,7 +81,7 @@
 // `kFailedPrecondition` if both codes apply. Similarly prefer `kNotFound` or
 // `kAlreadyExists` over `kFailedPrecondition`.
 //
-// Because these errors may travel RPC boundaries, these codes are tied to the
+// Because these errors may cross RPC boundaries, these codes are tied to the
 // `google.rpc.Code` definitions within
 // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
 // The string value of these RPC codes is denoted within each enum below.
@@ -98,7 +100,7 @@
 
   // StatusCode::kCancelled
   //
-  // kCanelled (gRPC code "CANCELLED") indicates the operation was cancelled,
+  // kCancelled (gRPC code "CANCELLED") indicates the operation was cancelled,
   // typically by the caller.
   kCancelled = 1,
 
@@ -113,10 +115,10 @@
   // StatusCode::kInvalidArgument
   //
   // kInvalidArgument (gRPC code "INVALID_ARGUMENT") indicates the caller
-  // specified an invalid argument, such a malformed filename. Note that such
-  // errors should be narrowly limited to indicate to the invalid nature of the
-  // arguments themselves. Errors with validly formed arguments that may cause
-  // errors with the state of the receiving system should be denoted with
+  // specified an invalid argument, such as a malformed filename. Note that use
+  // of such errors should be narrowly limited to indicate the invalid nature of
+  // the arguments themselves. Errors with validly formed arguments that may
+  // cause errors with the state of the receiving system should be denoted with
   // `kFailedPrecondition` instead.
   kInvalidArgument = 3,
 
@@ -136,14 +138,15 @@
   //
   // `kNotFound` is useful if a request should be denied for an entire class of
   // users, such as during a gradual feature rollout or undocumented allow list.
-  // If, instead, a request should be denied for specific sets of users, such as
-  // through user-based access control, use `kPermissionDenied` instead.
+  // If a request should be denied for specific sets of users, such as through
+  // user-based access control, use `kPermissionDenied` instead.
   kNotFound = 5,
 
   // StatusCode::kAlreadyExists
   //
-  // kAlreadyExists (gRPC code "ALREADY_EXISTS") indicates the entity that a
-  // caller attempted to create (such as file or directory) is already present.
+  // kAlreadyExists (gRPC code "ALREADY_EXISTS") indicates that the entity a
+  // caller attempted to create (such as a file or directory) is already
+  // present.
   kAlreadyExists = 6,
 
   // StatusCode::kPermissionDenied
@@ -182,7 +185,7 @@
   //      level (such as when a client-specified test-and-set fails, indicating
   //      the client should restart a read-modify-write sequence).
   //  (c) Use `kFailedPrecondition` if the client should not retry until
-  //      the system state has been explicitly fixed. For example, if an "rmdir"
+  //      the system state has been explicitly fixed. For example, if a "rmdir"
   //      fails because the directory is non-empty, `kFailedPrecondition`
   //      should be returned since the client should not retry unless
   //      the files are deleted from the directory.
@@ -198,9 +201,9 @@
   // `kAborted`, and `kUnavailable`.
   kAborted = 10,
 
-  // StatusCode::kOutofRange
+  // StatusCode::kOutOfRange
   //
-  // kOutofRange (gRPC code "OUT_OF_RANGE") indicates the operation was
+  // kOutOfRange (gRPC code "OUT_OF_RANGE") indicates the operation was
   // attempted past the valid range, such as seeking or reading past an
   // end-of-file.
   //
@@ -279,6 +282,59 @@
 // Streams StatusCodeToString(code) to `os`.
 std::ostream& operator<<(std::ostream& os, StatusCode code);
 
+// absl::StatusToStringMode
+//
+// An `absl::StatusToStringMode` is an enumerated type indicating how
+// `absl::Status::ToString()` should construct the output string for a non-ok
+// status.
+enum class StatusToStringMode : int {
+  // ToString will not contain any extra data (such as payloads). It will only
+  // contain the error code and message, if any.
+  kWithNoExtraData = 0,
+  // ToString will contain the payloads.
+  kWithPayload = 1 << 0,
+  // ToString will include all the extra data this Status has.
+  kWithEverything = ~kWithNoExtraData,
+  // Default mode used by ToString. Its exact value might change in the future.
+  kDefault = kWithPayload,
+};
+
+// absl::StatusToStringMode is specified as a bitmask type, which means the
+// following operations must be provided:
+inline constexpr StatusToStringMode operator&(StatusToStringMode lhs,
+                                              StatusToStringMode rhs) {
+  return static_cast<StatusToStringMode>(static_cast<int>(lhs) &
+                                         static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator|(StatusToStringMode lhs,
+                                              StatusToStringMode rhs) {
+  return static_cast<StatusToStringMode>(static_cast<int>(lhs) |
+                                         static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator^(StatusToStringMode lhs,
+                                              StatusToStringMode rhs) {
+  return static_cast<StatusToStringMode>(static_cast<int>(lhs) ^
+                                         static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator~(StatusToStringMode arg) {
+  return static_cast<StatusToStringMode>(~static_cast<int>(arg));
+}
+inline StatusToStringMode& operator&=(StatusToStringMode& lhs,
+                                      StatusToStringMode rhs) {
+  lhs = lhs & rhs;
+  return lhs;
+}
+inline StatusToStringMode& operator|=(StatusToStringMode& lhs,
+                                      StatusToStringMode rhs) {
+  lhs = lhs | rhs;
+  return lhs;
+}
+inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
+                                      StatusToStringMode rhs) {
+  lhs = lhs ^ rhs;
+  return lhs;
+}
+
 // absl::Status
 //
 // The `absl::Status` class is generally used to gracefully handle errors
@@ -291,7 +347,7 @@
 // API developers should construct their functions to return `absl::OkStatus()`
 // upon success, or an `absl::StatusCode` upon another type of error (e.g
 // an `absl::StatusCode::kInvalidArgument` error). The API provides convenience
-// functions to constuct each status code.
+// functions to construct each status code.
 //
 // Example:
 //
@@ -342,7 +398,7 @@
 //
 //   * It may provide more fine-grained semantic information about the error to
 //     facilitate actionable remedies.
-//   * It may provide human-readable contexual information that is more
+//   * It may provide human-readable contextual information that is more
 //     appropriate to display to an end user.
 //
 // Example:
@@ -360,7 +416,12 @@
 //     return result;
 //   }
 //
-class ABSL_MUST_USE_RESULT Status final {
+// For documentation see https://abseil.io/docs/cpp/guides/status.
+//
+// Returned Status objects may not be ignored. status_internal.h has a forward
+// declaration of the form
+// class ABSL_MUST_USE_RESULT Status;
+class Status final {
  public:
   // Constructors
 
@@ -370,10 +431,10 @@
   Status();
 
   // Creates a status in the canonical error space with the specified
-  // `absl::StatusCode` and error message.  If `code == absl::StatusCode::kOk`,
+  // `absl::StatusCode` and error message.  If `code == absl::StatusCode::kOk`,  // NOLINT
   // `msg` is ignored and an object identical to an OK status is constructed.
   //
-  // The `msg` string must be in UTF-8. The implementation may complain (e.g.,
+  // The `msg` string must be in UTF-8. The implementation may complain (e.g.,  // NOLINT
   // by printing a warning) if it is not.
   Status(absl::StatusCode code, absl::string_view msg);
 
@@ -408,8 +469,9 @@
 
   // Status::ok()
   //
-  // Returns `true` if `this->ok()`. Prefer checking for an OK status using this
-  // member function.
+  // Returns `true` if `this->code()` == `absl::StatusCode::kOk`,
+  // indicating the absence of an error.
+  // Prefer checking for an OK status using this member function.
   ABSL_MUST_USE_RESULT bool ok() const;
 
   // Status::code()
@@ -434,7 +496,7 @@
   // Returns the error message associated with this error code, if available.
   // Note that this message rarely describes the error code.  It is not unusual
   // for the error message to be the empty string. As a result, prefer
-  // `Status::ToString()` for debug logging.
+  // `operator<<` or `Status::ToString()` for debug logging.
   absl::string_view message() const;
 
   friend bool operator==(const Status&, const Status&);
@@ -442,15 +504,17 @@
 
   // Status::ToString()
   //
-  // Returns a combination of the error code name, the message and any
-  // associated payload messages. This string is designed simply to be human
-  // readable and its exact format should not be load bearing. Do not depend on
-  // the exact format of the result of `ToString()` which is subject to change.
+  // Returns a string based on the `mode`. By default, it returns combination of
+  // the error code name, the message and any associated payload messages. This
+  // string is designed simply to be human readable and its exact format should
+  // not be load bearing. Do not depend on the exact format of the result of
+  // `ToString()` which is subject to change.
   //
   // The printed code name and the message are generally substrings of the
   // result, and the payloads to be printed use the status payload printer
   // mechanism (which is internal).
-  std::string ToString() const;
+  std::string ToString(
+      StatusToStringMode mode = StatusToStringMode::kDefault) const;
 
   // Status::IgnoreError()
   //
@@ -469,12 +533,12 @@
   //----------------------------------------------------------------------------
 
   // A payload may be attached to a status to provide additional context to an
-  // error that may not be satisifed by an existing `absl::StatusCode`.
+  // error that may not be satisfied by an existing `absl::StatusCode`.
   // Typically, this payload serves one of several purposes:
   //
   //   * It may provide more fine-grained semantic information about the error
   //     to facilitate actionable remedies.
-  //   * It may provide human-readable contexual information that is more
+  //   * It may provide human-readable contextual information that is more
   //     appropriate to display to an end user.
   //
   // A payload consists of a [key,value] pair, where the key is a string
@@ -528,7 +592,7 @@
   // NOTE: Any mutation on the same 'absl::Status' object during visitation is
   // forbidden and could result in undefined behavior.
   void ForEachPayload(
-      const std::function<void(absl::string_view, const absl::Cord&)>& visitor)
+      absl::FunctionRef<void(absl::string_view, const absl::Cord&)> visitor)
       const;
 
  private:
@@ -549,9 +613,6 @@
   const status_internal::Payloads* GetPayloads() const;
   status_internal::Payloads* GetPayloads();
 
-  // Takes ownership of payload.
-  static uintptr_t NewRep(absl::StatusCode code, absl::string_view msg,
-                          std::unique_ptr<status_internal::Payloads> payload);
   static bool EqualsSlow(const absl::Status& a, const absl::Status& b);
 
   // MSVC 14.0 limitation requires the const.
@@ -580,8 +641,7 @@
   static uintptr_t PointerToRep(status_internal::StatusRep* r);
   static status_internal::StatusRep* RepToPointer(uintptr_t r);
 
-  // Returns string for non-ok Status.
-  std::string ToStringSlow() const;
+  std::string ToStringSlow(StatusToStringMode mode) const;
 
   // Status supports two different representations.
   //  - When the low bit is off it is an inlined representation.
@@ -678,6 +738,19 @@
 Status UnimplementedError(absl::string_view message);
 Status UnknownError(absl::string_view message);
 
+// ErrnoToStatusCode()
+//
+// Returns the StatusCode for `error_number`, which should be an `errno` value.
+// See https://en.cppreference.com/w/cpp/error/errno_macros and similar
+// references.
+absl::StatusCode ErrnoToStatusCode(int error_number);
+
+// ErrnoToStatus()
+//
+// Convenience function that creates a `absl::Status` using an `error_number`,
+// which should be an `errno` value.
+Status ErrnoToStatus(int error_number, absl::string_view message);
+
 //------------------------------------------------------------------------------
 // Implementation details follow
 //------------------------------------------------------------------------------
@@ -704,9 +777,11 @@
 
 inline Status& Status::operator=(Status&& x) {
   uintptr_t old_rep = rep_;
-  rep_ = x.rep_;
-  x.rep_ = MovedFromRep();
-  Unref(old_rep);
+  if (x.rep_ != old_rep) {
+    rep_ = x.rep_;
+    x.rep_ = MovedFromRep();
+    Unref(old_rep);
+  }
   return *this;
 }
 
@@ -743,8 +818,8 @@
   return !(lhs == rhs);
 }
 
-inline std::string Status::ToString() const {
-  return ok() ? "OK" : ToStringSlow();
+inline std::string Status::ToString(StatusToStringMode mode) const {
+  return ok() ? "OK" : ToStringSlow(mode);
 }
 
 inline void Status::IgnoreError() const {
@@ -811,6 +886,15 @@
 // message-less kCancelled errors are common in the infrastructure.
 inline Status CancelledError() { return Status(absl::StatusCode::kCancelled); }
 
+// Retrieves a message's status as a null terminated C string. The lifetime of
+// this string is tied to the lifetime of the status object itself.
+//
+// If the status's message is empty, the empty string is returned.
+//
+// StatusMessageAsCStr exists for C support. Use `status.message()` in C++.
+const char* StatusMessageAsCStr(
+    const Status& status ABSL_ATTRIBUTE_LIFETIME_BOUND);
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/abseil-cpp/absl/status/status_test.cc b/abseil-cpp/absl/status/status_test.cc
index ca9488a..898a9cb 100644
--- a/abseil-cpp/absl/status/status_test.cc
+++ b/abseil-cpp/absl/status/status_test.cc
@@ -14,6 +14,8 @@
 
 #include "absl/status/status.h"
 
+#include <errno.h>
+
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/strings/str_cat.h"
@@ -36,7 +38,9 @@
 // its creator, and its classifier.
 struct ErrorTest {
   absl::StatusCode code;
-  using Creator = absl::Status (*)(absl::string_view);
+  using Creator = absl::Status (*)(
+      absl::string_view
+  );
   using Classifier = bool (*)(const absl::Status&);
   Creator creator;
   Classifier classifier;
@@ -78,7 +82,9 @@
     // expected error code and message.
     std::string message =
         absl::StrCat("error code ", test.code, " test message");
-    absl::Status status = test.creator(message);
+    absl::Status status = test.creator(
+        message
+    );
     EXPECT_EQ(test.code, status.code());
     EXPECT_EQ(message, status.message());
 
@@ -126,6 +132,29 @@
   }
 }
 
+TEST(Status, StatusMessageCStringTest) {
+  {
+    absl::Status status = absl::OkStatus();
+    EXPECT_EQ(status.message(), "");
+    EXPECT_STREQ(absl::StatusMessageAsCStr(status), "");
+    EXPECT_EQ(status.message(), absl::StatusMessageAsCStr(status));
+    EXPECT_NE(absl::StatusMessageAsCStr(status), nullptr);
+  }
+  {
+    absl::Status status;
+    EXPECT_EQ(status.message(), "");
+    EXPECT_NE(absl::StatusMessageAsCStr(status), nullptr);
+    EXPECT_STREQ(absl::StatusMessageAsCStr(status), "");
+  }
+  {
+    absl::Status status(absl::StatusCode::kInternal, "message");
+    EXPECT_FALSE(status.ok());
+    EXPECT_EQ(absl::StatusCode::kInternal, status.code());
+    EXPECT_EQ("message", status.message());
+    EXPECT_STREQ("message", absl::StatusMessageAsCStr(status));
+  }
+}
+
 TEST(Status, ConstructOutOfRangeCode) {
   const int kRawCode = 9999;
   absl::Status status(static_cast<absl::StatusCode>(kRawCode), "");
@@ -280,6 +309,27 @@
                     HasSubstr("[bar='\\xff']")));
 }
 
+TEST(Status, ToStringMode) {
+  absl::Status s(absl::StatusCode::kInternal, "fail");
+  s.SetPayload("foo", absl::Cord("bar"));
+  s.SetPayload("bar", absl::Cord("\377"));
+
+  EXPECT_EQ("INTERNAL: fail",
+            s.ToString(absl::StatusToStringMode::kWithNoExtraData));
+
+  EXPECT_THAT(s.ToString(absl::StatusToStringMode::kWithPayload),
+              AllOf(HasSubstr("INTERNAL: fail"), HasSubstr("[foo='bar']"),
+                    HasSubstr("[bar='\\xff']")));
+
+  EXPECT_THAT(s.ToString(absl::StatusToStringMode::kWithEverything),
+              AllOf(HasSubstr("INTERNAL: fail"), HasSubstr("[foo='bar']"),
+                    HasSubstr("[bar='\\xff']")));
+
+  EXPECT_THAT(s.ToString(~absl::StatusToStringMode::kWithPayload),
+              AllOf(HasSubstr("INTERNAL: fail"), Not(HasSubstr("[foo='bar']")),
+                    Not(HasSubstr("[bar='\\xff']"))));
+}
+
 absl::Status EraseAndReturn(const absl::Status& base) {
   absl::Status copy = base;
   EXPECT_TRUE(copy.ErasePayload(kUrl1));
@@ -397,6 +447,12 @@
     assignee = std::move(status);
     EXPECT_EQ(assignee, copy);
   }
+  {
+    absl::Status status(absl::StatusCode::kInvalidArgument, "message");
+    absl::Status copy(status);
+    status = static_cast<absl::Status&&>(status);
+    EXPECT_EQ(status, copy);
+  }
 }
 
 TEST(Status, Update) {
@@ -455,4 +511,22 @@
   test_swap(with_payload, no_payload);
 }
 
+TEST(StatusErrno, ErrnoToStatusCode) {
+  EXPECT_EQ(absl::ErrnoToStatusCode(0), absl::StatusCode::kOk);
+
+  // Spot-check a few errno values.
+  EXPECT_EQ(absl::ErrnoToStatusCode(EINVAL),
+            absl::StatusCode::kInvalidArgument);
+  EXPECT_EQ(absl::ErrnoToStatusCode(ENOENT), absl::StatusCode::kNotFound);
+
+  // We'll pick a very large number so it hopefully doesn't collide to errno.
+  EXPECT_EQ(absl::ErrnoToStatusCode(19980927), absl::StatusCode::kUnknown);
+}
+
+TEST(StatusErrno, ErrnoToStatus) {
+  absl::Status status = absl::ErrnoToStatus(ENOENT, "Cannot open 'path'");
+  EXPECT_EQ(status.code(), absl::StatusCode::kNotFound);
+  EXPECT_EQ(status.message(), "Cannot open 'path': No such file or directory");
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/status/statusor.cc b/abseil-cpp/absl/status/statusor.cc
index b954b45..96642b3 100644
--- a/abseil-cpp/absl/status/statusor.cc
+++ b/abseil-cpp/absl/status/statusor.cc
@@ -16,6 +16,7 @@
 #include <cstdlib>
 #include <utility>
 
+#include "absl/base/call_once.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/status/status.h"
 #include "absl/strings/str_cat.h"
@@ -26,13 +27,44 @@
 BadStatusOrAccess::BadStatusOrAccess(absl::Status status)
     : status_(std::move(status)) {}
 
-BadStatusOrAccess::~BadStatusOrAccess() = default;
+BadStatusOrAccess::BadStatusOrAccess(const BadStatusOrAccess& other)
+    : status_(other.status_) {}
+
+BadStatusOrAccess& BadStatusOrAccess::operator=(
+    const BadStatusOrAccess& other) {
+  // Ensure assignment is correct regardless of whether this->InitWhat() has
+  // already been called.
+  other.InitWhat();
+  status_ = other.status_;
+  what_ = other.what_;
+  return *this;
+}
+
+BadStatusOrAccess& BadStatusOrAccess::operator=(BadStatusOrAccess&& other) {
+  // Ensure assignment is correct regardless of whether this->InitWhat() has
+  // already been called.
+  other.InitWhat();
+  status_ = std::move(other.status_);
+  what_ = std::move(other.what_);
+  return *this;
+}
+
+BadStatusOrAccess::BadStatusOrAccess(BadStatusOrAccess&& other)
+    : status_(std::move(other.status_)) {}
+
 const char* BadStatusOrAccess::what() const noexcept {
-  return "Bad StatusOr access";
+  InitWhat();
+  return what_.c_str();
 }
 
 const absl::Status& BadStatusOrAccess::status() const { return status_; }
 
+void BadStatusOrAccess::InitWhat() const {
+  absl::call_once(init_what_, [this] {
+    what_ = absl::StrCat("Bad StatusOr access: ", status_.ToString());
+  });
+}
+
 namespace internal_statusor {
 
 void Helper::HandleInvalidStatusCtorArg(absl::Status* status) {
diff --git a/abseil-cpp/absl/status/statusor.h b/abseil-cpp/absl/status/statusor.h
index bdf6039..54c7ce0 100644
--- a/abseil-cpp/absl/status/statusor.h
+++ b/abseil-cpp/absl/status/statusor.h
@@ -44,6 +44,7 @@
 #include <utility>
 
 #include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
 #include "absl/meta/type_traits.h"
 #include "absl/status/internal/statusor_internal.h"
 #include "absl/status/status.h"
@@ -72,13 +73,18 @@
 class BadStatusOrAccess : public std::exception {
  public:
   explicit BadStatusOrAccess(absl::Status status);
-  ~BadStatusOrAccess() override;
+  ~BadStatusOrAccess() override = default;
+
+  BadStatusOrAccess(const BadStatusOrAccess& other);
+  BadStatusOrAccess& operator=(const BadStatusOrAccess& other);
+  BadStatusOrAccess(BadStatusOrAccess&& other);
+  BadStatusOrAccess& operator=(BadStatusOrAccess&& other);
 
   // BadStatusOrAccess::what()
   //
   // Returns the associated explanatory string of the `absl::StatusOr<T>`
-  // object's error code. This function only returns the string literal "Bad
-  // StatusOr Access" for cases when evaluating general exceptions.
+  // object's error code. This function contains information about the failing
+  // status, but its exact formatting may change and should not be depended on.
   //
   // The pointer of this string is guaranteed to be valid until any non-const
   // function is invoked on the exception object.
@@ -91,12 +97,22 @@
   const absl::Status& status() const;
 
  private:
+  void InitWhat() const;
+
   absl::Status status_;
+  mutable absl::once_flag init_what_;
+  mutable std::string what_;
 };
 
 // Returned StatusOr objects may not be ignored.
 template <typename T>
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict
+// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
+class [[nodiscard]] StatusOr;
+#else
 class ABSL_MUST_USE_RESULT StatusOr;
+#endif  // ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
 
 // absl::StatusOr<T>
 //
@@ -129,13 +145,13 @@
 // Example:
 //
 //   absl::StatusOr<int> i = GetCount();
-//   if (foo.ok()) {
-//     updated_total += *i
+//   if (i.ok()) {
+//     updated_total += *i;
 //   }
 //
 // NOTE: using `absl::StatusOr<T>::value()` when no valid value is present will
 // throw an exception if exceptions are enabled or terminate the process when
-// execeptions are not enabled.
+// exceptions are not enabled.
 //
 // Example:
 //
@@ -146,8 +162,8 @@
 // A `absl::StatusOr<T*>` can be constructed from a null pointer like any other
 // pointer value, and the result will be that `ok()` returns `true` and
 // `value()` returns `nullptr`. Checking the value of pointer in an
-// `absl::StatusOr<T>` generally requires a bit more care, to ensure both that a
-// value is present and that value is not null:
+// `absl::StatusOr<T*>` generally requires a bit more care, to ensure both that
+// a value is present and that value is not null:
 //
 //  StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
 //  if (!result.ok()) {
@@ -395,7 +411,7 @@
       typename = typename std::enable_if<absl::conjunction<
           std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
           absl::disjunction<
-              std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>, T>,
+              std::is_same<absl::remove_cvref_t<U>, T>,
               absl::conjunction<
                   absl::negation<std::is_convertible<U&&, absl::Status>>,
                   absl::negation<internal_statusor::
@@ -419,8 +435,8 @@
   // if `T` can be constructed from a `U`. Can accept move or copy constructors.
   //
   // This constructor is explicit if `U` is not convertible to `T`. To avoid
-  // ambiguity, this constuctor is disabled if `U` is a `StatusOr<J>`, where `J`
-  // is convertible to `T`.
+  // ambiguity, this constructor is disabled if `U` is a `StatusOr<J>`, where
+  // `J` is convertible to `T`.
   template <
       typename U = T,
       absl::enable_if_t<
@@ -428,8 +444,7 @@
               internal_statusor::IsDirectInitializationValid<T, U&&>,
               std::is_constructible<T, U&&>, std::is_convertible<U&&, T>,
               absl::disjunction<
-                  std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
-                               T>,
+                  std::is_same<absl::remove_cvref_t<U>, T>,
                   absl::conjunction<
                       absl::negation<std::is_convertible<U&&, absl::Status>>,
                       absl::negation<
@@ -437,8 +452,7 @@
                               T, U&&>>>>>::value,
           int> = 0>
   StatusOr(U&& u)  // NOLINT
-      : StatusOr(absl::in_place, std::forward<U>(u)) {
-  }
+      : StatusOr(absl::in_place, std::forward<U>(u)) {}
 
   template <
       typename U = T,
@@ -446,8 +460,7 @@
           absl::conjunction<
               internal_statusor::IsDirectInitializationValid<T, U&&>,
               absl::disjunction<
-                  std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
-                               T>,
+                  std::is_same<absl::remove_cvref_t<U>, T>,
                   absl::conjunction<
                       absl::negation<std::is_constructible<absl::Status, U&&>>,
                       absl::negation<
@@ -457,13 +470,12 @@
               absl::negation<std::is_convertible<U&&, T>>>::value,
           int> = 0>
   explicit StatusOr(U&& u)  // NOLINT
-      : StatusOr(absl::in_place, std::forward<U>(u)) {
-  }
+      : StatusOr(absl::in_place, std::forward<U>(u)) {}
 
   // StatusOr<T>::ok()
   //
   // Returns whether or not this `absl::StatusOr<T>` holds a `T` value. This
-  // member function is analagous to `absl::Status::ok()` and should be used
+  // member function is analogous to `absl::Status::ok()` and should be used
   // similarly to check the status of return values.
   //
   // Example:
@@ -481,7 +493,7 @@
   // Returns a reference to the current `absl::Status` contained within the
   // `absl::StatusOr<T>`. If `absl::StatusOr<T>` contains a `T`, then this
   // function returns `absl::OkStatus()`.
-  const Status& status() const &;
+  const Status& status() const&;
   Status status() &&;
 
   // StatusOr<T>::value()
@@ -510,10 +522,10 @@
   //
   // The `std::move` on statusor instead of on the whole expression enables
   // warnings about possible uses of the statusor object after the move.
-  const T& value() const&;
-  T& value() &;
-  const T&& value() const&&;
-  T&& value() &&;
+  const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND;
 
   // StatusOr<T>:: operator*()
   //
@@ -525,10 +537,10 @@
   // `absl::StatusOr<T>`. Alternatively, see the `value()` member function for a
   // similar API that guarantees crashing or throwing an exception if there is
   // no current value.
-  const T& operator*() const&;
-  T& operator*() &;
-  const T&& operator*() const&&;
-  T&& operator*() &&;
+  const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND;
 
   // StatusOr<T>::operator->()
   //
@@ -537,12 +549,12 @@
   // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined.
   //
   // Use `this->ok()` to verify that there is a current value.
-  const T* operator->() const;
-  T* operator->();
+  const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
+  T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND;
 
   // StatusOr<T>::value_or()
   //
-  // Returns the current value of `this->ok() == true`. Otherwise constructs a
+  // Returns the current value if `this->ok() == true`. Otherwise constructs a
   // value using the provided `default_value`.
   //
   // Unlike `value`, this function returns by value, copying the current value
@@ -570,7 +582,7 @@
   // Reconstructs the inner value T in-place using the provided args, using the
   // T(args...) constructor. Returns reference to the reconstructed `T`.
   template <typename... Args>
-  T& emplace(Args&&... args) {
+  T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (ok()) {
       this->Clear();
       this->MakeValue(std::forward<Args>(args)...);
@@ -586,7 +598,8 @@
       absl::enable_if_t<
           std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
           int> = 0>
-  T& emplace(std::initializer_list<U> ilist, Args&&... args) {
+  T& emplace(std::initializer_list<U> ilist,
+             Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     if (ok()) {
       this->Clear();
       this->MakeValue(ilist, std::forward<Args>(args)...);
@@ -597,6 +610,21 @@
     return this->data_;
   }
 
+  // StatusOr<T>::AssignStatus()
+  //
+  // Sets the status of `absl::StatusOr<T>` to the given non-ok status value.
+  //
+  // NOTE: We recommend using the constructor and `operator=` where possible.
+  // This method is intended for use in generic programming, to enable setting
+  // the status of a `StatusOr<T>` when `T` may be `Status`. In that case, the
+  // constructor and `operator=` would assign into the inner value of type
+  // `Status`, rather than status of the `StatusOr` (b/280392796).
+  //
+  // REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
+  // In optimized builds, passing absl::OkStatus() here will have the effect
+  // of passing absl::StatusCode::kInternal as a fallback.
+  using internal_statusor::StatusOrData<T>::AssignStatus;
+
  private:
   using internal_statusor::StatusOrData<T>::Assign;
   template <typename U>
@@ -661,7 +689,9 @@
     : Base(absl::in_place, ilist, std::forward<Args>(args)...) {}
 
 template <typename T>
-const Status& StatusOr<T>::status() const & { return this->status_; }
+const Status& StatusOr<T>::status() const& {
+  return this->status_;
+}
 template <typename T>
 Status StatusOr<T>::status() && {
   return ok() ? OkStatus() : std::move(this->status_);
diff --git a/abseil-cpp/absl/status/statusor_test.cc b/abseil-cpp/absl/status/statusor_test.cc
index 5e4b268..e65f5d2 100644
--- a/abseil-cpp/absl/status/statusor_test.cc
+++ b/abseil-cpp/absl/status/statusor_test.cc
@@ -17,6 +17,7 @@
 #include <array>
 #include <initializer_list>
 #include <memory>
+#include <string>
 #include <type_traits>
 #include <utility>
 
@@ -25,6 +26,7 @@
 #include "absl/base/casts.h"
 #include "absl/memory/memory.h"
 #include "absl/status/status.h"
+#include "absl/strings/string_view.h"
 #include "absl/types/any.h"
 #include "absl/utility/utility.h"
 
@@ -34,6 +36,7 @@
 using ::testing::AnyWith;
 using ::testing::ElementsAre;
 using ::testing::Field;
+using ::testing::HasSubstr;
 using ::testing::Ne;
 using ::testing::Not;
 using ::testing::Pointee;
@@ -257,9 +260,9 @@
 
 TEST(StatusOr, TestValueOrDieOverloadForConstTemporary) {
   static_assert(
-      std::is_same<const int&&,
-                   decltype(
-                       std::declval<const absl::StatusOr<int>&&>().value())>(),
+      std::is_same<
+          const int&&,
+          decltype(std::declval<const absl::StatusOr<int>&&>().value())>(),
       "value() for const temporaries should return const T&&");
 }
 
@@ -292,20 +295,68 @@
   EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
 }
 
+TEST(StatusOr, StatusCtorForwards) {
+  absl::Status status(absl::StatusCode::kInternal, "Some error");
+
+  EXPECT_EQ(absl::StatusOr<int>(status).status().message(), "Some error");
+  EXPECT_EQ(status.message(), "Some error");
+
+  EXPECT_EQ(absl::StatusOr<int>(std::move(status)).status().message(),
+            "Some error");
+  EXPECT_NE(status.message(), "Some error");
+}
+
+TEST(BadStatusOrAccessTest, CopyConstructionWhatOk) {
+  absl::Status error =
+      absl::InternalError("some arbitrary message too big for the sso buffer");
+  absl::BadStatusOrAccess e1{error};
+  absl::BadStatusOrAccess e2{e1};
+  EXPECT_THAT(e1.what(), HasSubstr(error.ToString()));
+  EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
+}
+
+TEST(BadStatusOrAccessTest, CopyAssignmentWhatOk) {
+  absl::Status error =
+      absl::InternalError("some arbitrary message too big for the sso buffer");
+  absl::BadStatusOrAccess e1{error};
+  absl::BadStatusOrAccess e2{absl::InternalError("other")};
+  e2 = e1;
+  EXPECT_THAT(e1.what(), HasSubstr(error.ToString()));
+  EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
+}
+
+TEST(BadStatusOrAccessTest, MoveConstructionWhatOk) {
+  absl::Status error =
+      absl::InternalError("some arbitrary message too big for the sso buffer");
+  absl::BadStatusOrAccess e1{error};
+  absl::BadStatusOrAccess e2{std::move(e1)};
+  EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
+}
+
+TEST(BadStatusOrAccessTest, MoveAssignmentWhatOk) {
+  absl::Status error =
+      absl::InternalError("some arbitrary message too big for the sso buffer");
+  absl::BadStatusOrAccess e1{error};
+  absl::BadStatusOrAccess e2{absl::InternalError("other")};
+  e2 = std::move(e1);
+  EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
+}
+
 // Define `EXPECT_DEATH_OR_THROW` to test the behavior of `StatusOr::value`,
 // which either throws `BadStatusOrAccess` or `LOG(FATAL)` based on whether
 // exceptions are enabled.
 #ifdef ABSL_HAVE_EXCEPTIONS
-#define EXPECT_DEATH_OR_THROW(statement, status_)    \
-  EXPECT_THROW(                                      \
-      {                                              \
-        try {                                        \
-          statement;                                 \
-        } catch (const absl::BadStatusOrAccess& e) { \
-          EXPECT_EQ(e.status(), status_);            \
-          throw;                                     \
-        }                                            \
-      },                                             \
+#define EXPECT_DEATH_OR_THROW(statement, status_)                  \
+  EXPECT_THROW(                                                    \
+      {                                                            \
+        try {                                                      \
+          statement;                                               \
+        } catch (const absl::BadStatusOrAccess& e) {               \
+          EXPECT_EQ(e.status(), status_);                          \
+          EXPECT_THAT(e.what(), HasSubstr(e.status().ToString())); \
+          throw;                                                   \
+        }                                                          \
+      },                                                           \
       absl::BadStatusOrAccess);
 #else  // ABSL_HAVE_EXCEPTIONS
 #define EXPECT_DEATH_OR_THROW(statement, status) \
@@ -401,8 +452,6 @@
   EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);
 }
 
-
-
 TEST(StatusOr, TestValueCtor) {
   const int kI = 4;
   const absl::StatusOr<int> thing(kI);
@@ -1289,8 +1338,6 @@
   EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
 }
 
-
-
 TEST(StatusOr, TestPointerStatusCtor) {
   absl::StatusOr<int*> thing(absl::CancelledError());
   EXPECT_FALSE(thing.ok());
@@ -1474,7 +1521,7 @@
 TEST(StatusOr, TestIgnoreError) { MakeStatus().IgnoreError(); }
 
 TEST(StatusOr, EqualityOperator) {
-  constexpr int kNumCases = 4;
+  constexpr size_t kNumCases = 4;
   std::array<absl::StatusOr<int>, kNumCases> group1 = {
       absl::StatusOr<int>(1), absl::StatusOr<int>(2),
       absl::StatusOr<int>(absl::InvalidArgumentError("msg")),
@@ -1483,8 +1530,8 @@
       absl::StatusOr<int>(1), absl::StatusOr<int>(2),
       absl::StatusOr<int>(absl::InvalidArgumentError("msg")),
       absl::StatusOr<int>(absl::InternalError("msg"))};
-  for (int i = 0; i < kNumCases; ++i) {
-    for (int j = 0; j < kNumCases; ++j) {
+  for (size_t i = 0; i < kNumCases; ++i) {
+    for (size_t j = 0; j < kNumCases; ++j) {
       if (i == j) {
         EXPECT_TRUE(group1[i] == group2[j]);
         EXPECT_FALSE(group1[i] != group2[j]);
@@ -1797,4 +1844,37 @@
   }
 }
 
+TEST(StatusOr, StatusAssignmentFromStatusError) {
+  absl::StatusOr<absl::Status> statusor;
+  statusor.AssignStatus(absl::CancelledError());
+
+  EXPECT_FALSE(statusor.ok());
+  EXPECT_EQ(statusor.status(), absl::CancelledError());
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(StatusOr, StatusAssignmentFromStatusOk) {
+  EXPECT_DEBUG_DEATH(
+      {
+        absl::StatusOr<absl::Status> statusor;
+        // This will DCHECK.
+        statusor.AssignStatus(absl::OkStatus());
+        // In optimized mode, we are actually going to get error::INTERNAL for
+        // status here, rather than crashing, so check that.
+        EXPECT_FALSE(statusor.ok());
+        EXPECT_EQ(statusor.status().code(), absl::StatusCode::kInternal);
+      },
+      "An OK status is not a valid constructor argument to StatusOr<T>");
+}
+#endif
+
+TEST(StatusOr, StatusAssignmentFromTypeConvertibleToStatus) {
+  CustomType<MyType, kConvToStatus> v;
+  absl::StatusOr<MyType> statusor;
+  statusor.AssignStatus(v);
+
+  EXPECT_FALSE(statusor.ok());
+  EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/strings/BUILD.bazel b/abseil-cpp/absl/strings/BUILD.bazel
index 64a13ce..819bbe6 100644
--- a/abseil-cpp/absl/strings/BUILD.bazel
+++ b/abseil-cpp/absl/strings/BUILD.bazel
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
     "ABSL_TEST_COPTS",
 )
 
@@ -28,6 +28,20 @@
 licenses(["notice"])
 
 cc_library(
+    name = "string_view",
+    srcs = ["string_view.cc"],
+    hdrs = ["string_view.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:throw_delegate",
+    ],
+)
+
+cc_library(
     name = "strings",
     srcs = [
         "ascii.cc",
@@ -37,23 +51,28 @@
         "internal/charconv_bigint.h",
         "internal/charconv_parse.cc",
         "internal/charconv_parse.h",
+        "internal/damerau_levenshtein_distance.cc",
         "internal/memutil.cc",
         "internal/memutil.h",
         "internal/stl_type_traits.h",
         "internal/str_join_internal.h",
         "internal/str_split_internal.h",
+        "internal/stringify_sink.cc",
+        "internal/stringify_sink.h",
         "match.cc",
         "numbers.cc",
         "str_cat.cc",
         "str_replace.cc",
         "str_split.cc",
-        "string_view.cc",
         "substitute.cc",
     ],
     hdrs = [
         "ascii.h",
         "charconv.h",
         "escaping.h",
+        "internal/damerau_levenshtein_distance.h",
+        "internal/has_absl_stringify.h",
+        "internal/string_constant.h",
         "match.h",
         "numbers.h",
         "str_cat.h",
@@ -65,10 +84,17 @@
         "substitute.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    textual_hdrs = [
+        # string_view.h was once part of :strings, so string_view.h is
+        # re-exported for backwards compatibility.
+        # New code should directly depend on :string_view.
+        "string_view.h",
+    ],
     deps = [
         ":internal",
+        ":string_view",
         "//absl/base",
-        "//absl/base:bits",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:endian",
@@ -76,6 +102,7 @@
         "//absl/base:throw_delegate",
         "//absl/memory",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/numeric:int128",
     ],
 )
@@ -95,6 +122,7 @@
         "internal/utf8.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
@@ -176,6 +204,19 @@
 )
 
 cc_test(
+    name = "damerau_levenshtein_distance_test",
+    size = "small",
+    srcs = [
+        "internal/damerau_levenshtein_distance_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        "//absl/strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "memutil_benchmark",
     srcs = [
         "internal/memutil.h",
@@ -223,12 +264,26 @@
 )
 
 cc_test(
+    name = "string_constant_test",
+    size = "small",
+    srcs = ["internal/string_constant_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":strings",
+        "//absl/meta:type_traits",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "string_view_benchmark",
     srcs = ["string_view_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     tags = ["benchmark"],
     visibility = ["//visibility:private"],
     deps = [
+        ":string_view",
         ":strings",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
@@ -243,7 +298,7 @@
     copts = ABSL_TEST_COPTS,
     visibility = ["//visibility:private"],
     deps = [
-        ":strings",
+        ":string_view",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:dynamic_annotations",
@@ -253,14 +308,152 @@
 
 cc_library(
     name = "cord_internal",
-    hdrs = ["internal/cord_internal.h"],
+    srcs = [
+        "internal/cord_internal.cc",
+        "internal/cord_rep_btree.cc",
+        "internal/cord_rep_btree_navigator.cc",
+        "internal/cord_rep_btree_reader.cc",
+        "internal/cord_rep_consume.cc",
+        "internal/cord_rep_crc.cc",
+        "internal/cord_rep_ring.cc",
+    ],
+    hdrs = [
+        "internal/cord_data_edge.h",
+        "internal/cord_internal.h",
+        "internal/cord_rep_btree.h",
+        "internal/cord_rep_btree_navigator.h",
+        "internal/cord_rep_btree_reader.h",
+        "internal/cord_rep_consume.h",
+        "internal/cord_rep_crc.h",
+        "internal/cord_rep_flat.h",
+        "internal/cord_rep_ring.h",
+        "internal/cord_rep_ring_reader.h",
+    ],
     copts = ABSL_DEFAULT_COPTS,
-    visibility = ["//visibility:private"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//visibility:private",
+    ],
     deps = [
         ":strings",
         "//absl/base:base_internal",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:endian",
+        "//absl/base:raw_logging_internal",
+        "//absl/base:throw_delegate",
         "//absl/container:compressed_tuple",
+        "//absl/container:container_memory",
+        "//absl/container:inlined_vector",
+        "//absl/container:layout",
+        "//absl/crc:crc_cord_state",
+        "//absl/functional:function_ref",
         "//absl/meta:type_traits",
+        "//absl/types:span",
+    ],
+)
+
+cc_test(
+    name = "cord_data_edge_test",
+    size = "small",
+    srcs = ["internal/cord_data_edge_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord_internal",
+        ":cord_rep_test_util",
+        ":strings",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cord_rep_btree_test",
+    size = "medium",
+    timeout = "long",
+    srcs = ["internal/cord_rep_btree_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord_internal",
+        ":cord_rep_test_util",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:raw_logging_internal",
+        "//absl/cleanup",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cord_rep_btree_navigator_test",
+    size = "medium",
+    srcs = ["internal/cord_rep_btree_navigator_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord_internal",
+        ":cord_rep_test_util",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:raw_logging_internal",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cord_rep_btree_reader_test",
+    size = "medium",
+    srcs = ["internal/cord_rep_btree_reader_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord",
+        ":cord_internal",
+        ":cord_rep_test_util",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:raw_logging_internal",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cord_rep_crc_test",
+    size = "small",
+    srcs = ["internal/cord_rep_crc_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord_internal",
+        ":cord_rep_test_util",
+        "//absl/base:config",
+        "//absl/crc:crc_cord_state",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "cordz_update_tracker",
+    hdrs = ["internal/cordz_update_tracker.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = ["//absl/base:config"],
+)
+
+cc_test(
+    name = "cordz_update_tracker_test",
+    srcs = ["internal/cordz_update_tracker_test.cc"],
+    deps = [
+        ":cordz_update_tracker",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest_main",
     ],
 )
 
@@ -268,25 +461,255 @@
     name = "cord",
     srcs = [
         "cord.cc",
+        "cord_analysis.cc",
+        "cord_analysis.h",
+        "cord_buffer.cc",
     ],
     hdrs = [
         "cord.h",
+        "cord_buffer.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":cord_internal",
+        ":cordz_functions",
+        ":cordz_info",
+        ":cordz_statistics",
+        ":cordz_update_scope",
+        ":cordz_update_tracker",
         ":internal",
-        ":str_format",
         ":strings",
         "//absl/base",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:endian",
         "//absl/base:raw_logging_internal",
         "//absl/container:fixed_array",
         "//absl/container:inlined_vector",
+        "//absl/crc:crc_cord_state",
         "//absl/functional:function_ref",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/types:optional",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "cordz_handle",
+    srcs = ["internal/cordz_handle.cc"],
+    hdrs = ["internal/cordz_handle.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:raw_logging_internal",
+        "//absl/synchronization",
+    ],
+)
+
+cc_library(
+    name = "cordz_info",
+    srcs = ["internal/cordz_info.cc"],
+    hdrs = ["internal/cordz_info.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":cord_internal",
+        ":cordz_functions",
+        ":cordz_handle",
+        ":cordz_statistics",
+        ":cordz_update_tracker",
+        "//absl/base",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
+        "//absl/container:inlined_vector",
+        "//absl/debugging:stacktrace",
+        "//absl/synchronization",
+        "//absl/time",
+        "//absl/types:span",
+    ],
+)
+
+cc_library(
+    name = "cordz_update_scope",
+    hdrs = ["internal/cordz_update_scope.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":cord_internal",
+        ":cordz_info",
+        ":cordz_update_tracker",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_test(
+    name = "cordz_update_scope_test",
+    srcs = ["internal/cordz_update_scope_test.cc"],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        ":cord_internal",
+        ":cordz_info",
+        ":cordz_test_helpers",
+        ":cordz_update_scope",
+        ":cordz_update_tracker",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "cordz_sample_token",
+    srcs = ["internal/cordz_sample_token.cc"],
+    hdrs = ["internal/cordz_sample_token.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":cordz_handle",
+        ":cordz_info",
+        "//absl/base:config",
+    ],
+)
+
+cc_library(
+    name = "cordz_functions",
+    srcs = ["internal/cordz_functions.cc"],
+    hdrs = ["internal/cordz_functions.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
+        "//absl/profiling:exponential_biased",
+    ],
+)
+
+cc_library(
+    name = "cordz_statistics",
+    hdrs = ["internal/cordz_statistics.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":cordz_update_tracker",
+        "//absl/base:config",
+    ],
+)
+
+cc_test(
+    name = "cordz_functions_test",
+    srcs = [
+        "internal/cordz_functions_test.cc",
+    ],
+    deps = [
+        ":cordz_functions",
+        ":cordz_test_helpers",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cordz_handle_test",
+    srcs = [
+        "internal/cordz_handle_test.cc",
+    ],
+    deps = [
+        ":cordz_handle",
+        "//absl/base:config",
+        "//absl/memory",
+        "//absl/random",
+        "//absl/random:distributions",
+        "//absl/synchronization",
+        "//absl/synchronization:thread_pool",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cordz_info_test",
+    srcs = [
+        "internal/cordz_info_test.cc",
+    ],
+    deps = [
+        ":cord_internal",
+        ":cordz_handle",
+        ":cordz_info",
+        ":cordz_statistics",
+        ":cordz_test_helpers",
+        ":cordz_update_tracker",
+        ":strings",
+        "//absl/base:config",
+        "//absl/debugging:stacktrace",
+        "//absl/debugging:symbolize",
+        "//absl/types:span",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cordz_info_statistics_test",
+    srcs = [
+        "internal/cordz_info_statistics_test.cc",
+    ],
+    deps = [
+        ":cord",
+        ":cord_internal",
+        ":cordz_info",
+        ":cordz_sample_token",
+        ":cordz_statistics",
+        ":cordz_update_scope",
+        ":cordz_update_tracker",
+        "//absl/base:config",
+        "//absl/crc:crc_cord_state",
+        "//absl/synchronization",
+        "//absl/synchronization:thread_pool",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cordz_sample_token_test",
+    srcs = [
+        "internal/cordz_sample_token_test.cc",
+    ],
+    deps = [
+        ":cord_internal",
+        ":cordz_handle",
+        ":cordz_info",
+        ":cordz_sample_token",
+        ":cordz_test_helpers",
+        "//absl/base:config",
+        "//absl/memory",
+        "//absl/random",
+        "//absl/synchronization",
+        "//absl/synchronization:thread_pool",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
     ],
 )
 
@@ -297,8 +720,62 @@
         "cord_test_helpers.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":cord",
+        ":cord_internal",
+        ":strings",
+        "//absl/base:config",
+    ],
+)
+
+cc_library(
+    name = "cord_rep_test_util",
+    testonly = 1,
+    hdrs = ["internal/cord_rep_test_util.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":cord_internal",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:raw_logging_internal",
+    ],
+)
+
+cc_library(
+    name = "cordz_test_helpers",
+    testonly = 1,
+    hdrs = ["cordz_test_helpers.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":cord",
+        ":cord_internal",
+        ":cordz_info",
+        ":cordz_sample_token",
+        ":cordz_statistics",
+        ":cordz_update_tracker",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_test(
+    name = "cord_buffer_test",
+    size = "small",
+    srcs = ["cord_buffer_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord",
+        ":cord_internal",
+        ":cord_rep_test_util",
+        "//absl/base:config",
+        "//absl/types:span",
+        "@com_google_googletest//:gtest_main",
     ],
 )
 
@@ -311,14 +788,82 @@
     deps = [
         ":cord",
         ":cord_test_helpers",
+        ":cordz_functions",
+        ":cordz_test_helpers",
         ":str_format",
         ":strings",
-        "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:endian",
-        "//absl/base:raw_logging_internal",
         "//absl/container:fixed_array",
+        "//absl/hash",
+        "//absl/log",
+        "//absl/log:check",
+        "//absl/random",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cordz_test",
+    size = "medium",
+    srcs = ["cordz_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    tags = [
+        "benchmark",
+        "no_test_android_arm",
+        "no_test_android_arm64",
+        "no_test_android_x86",
+        "no_test_ios_x86_64",
+        "no_test_lexan",
+        "no_test_loonix",
+    ],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord",
+        ":cord_test_helpers",
+        ":cordz_functions",
+        ":cordz_info",
+        ":cordz_sample_token",
+        ":cordz_statistics",
+        ":cordz_test_helpers",
+        ":cordz_update_tracker",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cord_ring_test",
+    size = "medium",
+    srcs = ["cord_ring_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord_internal",
+        ":strings",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
+        "//absl/debugging:leak_check",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "cord_ring_reader_test",
+    size = "medium",
+    srcs = ["cord_ring_reader_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":cord_internal",
+        ":strings",
+        "//absl/base:core_headers",
+        "//absl/debugging:leak_check",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -370,6 +915,7 @@
         ":strings",
         "//absl/base:core_headers",
         "//absl/base:dynamic_annotations",
+        "//absl/container:btree",
         "//absl/container:flat_hash_map",
         "//absl/container:node_hash_map",
         "@com_google_googletest//:gtest_main",
@@ -462,8 +1008,8 @@
     copts = ABSL_TEST_COPTS,
     visibility = ["//visibility:private"],
     deps = [
+        ":str_format",
         ":strings",
-        "//absl/base:core_headers",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -494,7 +1040,7 @@
         ":pow10_helper",
         ":strings",
         "//absl/base:config",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/random",
         "//absl/random:distributions",
         "@com_google_googletest//:gtest_main",
@@ -571,7 +1117,7 @@
     deps = [
         ":strings",
         "//absl/base:config",
-        "//absl/base:raw_logging_internal",
+        "//absl/log:check",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -611,6 +1157,7 @@
         "str_format.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":str_format_internal",
     ],
@@ -630,23 +1177,28 @@
         "internal/str_format/arg.h",
         "internal/str_format/bind.h",
         "internal/str_format/checker.h",
+        "internal/str_format/constexpr_parser.h",
         "internal/str_format/extension.h",
         "internal/str_format/float_conversion.h",
         "internal/str_format/output.h",
         "internal/str_format/parser.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     visibility = ["//visibility:private"],
     deps = [
         ":strings",
-        "//absl/base:bits",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/container:inlined_vector",
         "//absl/functional:function_ref",
         "//absl/meta:type_traits",
+        "//absl/numeric:bits",
         "//absl/numeric:int128",
+        "//absl/numeric:representation",
         "//absl/types:optional",
         "//absl/types:span",
+        "//absl/utility",
     ],
 )
 
@@ -659,7 +1211,6 @@
         ":cord",
         ":str_format",
         ":strings",
-        "//absl/base:core_headers",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -722,7 +1273,9 @@
     deps = [
         ":str_format_internal",
         ":strings",
+        "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/types:optional",
         "@com_google_googletest//:gtest_main",
     ],
@@ -757,6 +1310,7 @@
     testonly = True,
     srcs = ["internal/pow10_helper.cc"],
     hdrs = ["internal/pow10_helper.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     visibility = ["//visibility:private"],
     deps = ["//absl/base:config"],
 )
@@ -772,3 +1326,30 @@
         "@com_google_googletest//:gtest_main",
     ],
 )
+
+cc_binary(
+    name = "atod_manual_test",
+    testonly = 1,
+    srcs = ["atod_manual_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:private"],
+    deps = [
+        ":str_format",
+        ":strings",
+        "//absl/base",
+        "//absl/types:optional",
+    ],
+)
+
+cc_test(
+    name = "char_formatting_test",
+    srcs = [
+        "char_formatting_test.cc",
+    ],
+    deps = [
+        ":str_format",
+        ":strings",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/abseil-cpp/absl/strings/CMakeLists.txt b/abseil-cpp/absl/strings/CMakeLists.txt
index d6c2126..1959dc9 100644
--- a/abseil-cpp/absl/strings/CMakeLists.txt
+++ b/abseil-cpp/absl/strings/CMakeLists.txt
@@ -16,18 +16,37 @@
 
 absl_cc_library(
   NAME
+    string_view
+  HDRS
+    string_view.h
+  SRCS
+    string_view.cc
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::core_headers
+    absl::throw_delegate
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
     strings
   HDRS
     "ascii.h"
     "charconv.h"
     "escaping.h"
+    "internal/damerau_levenshtein_distance.h"
+    "internal/string_constant.h"
+    "internal/has_absl_stringify.h"
     "match.h"
     "numbers.h"
     "str_cat.h"
     "str_join.h"
     "str_replace.h"
     "str_split.h"
-    "string_view.h"
     "strip.h"
     "substitute.h"
   SRCS
@@ -38,8 +57,11 @@
     "internal/charconv_bigint.h"
     "internal/charconv_parse.cc"
     "internal/charconv_parse.h"
+    "internal/damerau_levenshtein_distance.cc"
     "internal/memutil.cc"
     "internal/memutil.h"
+    "internal/stringify_sink.h"
+    "internal/stringify_sink.cc"
     "internal/stl_type_traits.h"
     "internal/str_join_internal.h"
     "internal/str_split_internal.h"
@@ -48,11 +70,11 @@
     "str_cat.cc"
     "str_replace.cc"
     "str_split.cc"
-    "string_view.cc"
     "substitute.cc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::string_view
     absl::strings_internal
     absl::base
     absl::bits
@@ -67,6 +89,7 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     strings_internal
@@ -100,7 +123,7 @@
   DEPS
     absl::strings
     absl::base
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -114,7 +137,7 @@
     absl::strings
     absl::core_headers
     absl::fixed_array
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -127,7 +150,20 @@
   DEPS
     absl::strings
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    damerau_levenshtein_distance_test
+  SRCS
+    "internal/damerau_levenshtein_distance_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::strings
+    absl::base
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -141,7 +177,7 @@
   DEPS
     absl::strings
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -155,7 +191,20 @@
     absl::strings_internal
     absl::base
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    string_constant_test
+  SRCS
+    "internal/string_constant_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::strings
+    absl::type_traits
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -170,7 +219,7 @@
     absl::config
     absl::core_headers
     absl::dynamic_annotations
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -183,7 +232,7 @@
   DEPS
     absl::strings
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -195,7 +244,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -207,12 +256,12 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::strings
-    absl::base
     absl::core_headers
     absl::dynamic_annotations
+    absl::btree
     absl::flat_hash_map
     absl::node_hash_map
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -224,7 +273,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::strings_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -239,7 +288,7 @@
     absl::base
     absl::core_headers
     absl::type_traits
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -254,7 +303,7 @@
     absl::base
     absl::core_headers
     absl::memory
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -266,8 +315,9 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::strings
+    absl::str_format
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -283,11 +333,11 @@
     absl::core_headers
     absl::pow10_helper
     absl::config
-    absl::raw_logging_internal
+    absl::log
     absl::random_random
     absl::random_distributions
     absl::strings_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -300,7 +350,7 @@
   DEPS
     absl::strings
     absl::base
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -312,7 +362,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::strings_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -326,7 +376,7 @@
     absl::strings
     absl::str_format
     absl::pow10_helper
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -338,10 +388,10 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
-    absl::strings
+    absl::check
     absl::config
-    absl::raw_logging_internal
-    gmock_main
+    absl::strings
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -356,7 +406,7 @@
   DEPS
     absl::strings
     absl::config
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -371,6 +421,7 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     str_format_internal
@@ -378,6 +429,7 @@
     "internal/str_format/arg.h"
     "internal/str_format/bind.h"
     "internal/str_format/checker.h"
+    "internal/str_format/constexpr_parser.h"
     "internal/str_format/extension.h"
     "internal/str_format/float_conversion.h"
     "internal/str_format/output.h"
@@ -396,7 +448,10 @@
     absl::strings
     absl::config
     absl::core_headers
+    absl::inlined_vector
+    absl::numeric_representation
     absl::type_traits
+    absl::utility
     absl::int128
     absl::span
 )
@@ -413,7 +468,7 @@
     absl::cord
     absl::strings
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -427,7 +482,7 @@
     absl::str_format
     absl::str_format_internal
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -440,7 +495,7 @@
   DEPS
     absl::str_format
     absl::str_format_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -452,7 +507,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::str_format_internal
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -464,7 +519,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::str_format
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -477,9 +532,11 @@
   DEPS
     absl::strings
     absl::str_format_internal
+    absl::core_headers
+    absl::log
     absl::raw_logging_internal
     absl::int128
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -492,7 +549,7 @@
   DEPS
     absl::str_format_internal
     absl::cord
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -505,9 +562,24 @@
   DEPS
     absl::str_format_internal
     absl::core_headers
-    gmock_main
+    GTest::gmock_main
 )
 
+absl_cc_test(
+  NAME
+    char_formatting_test
+  SRCS
+    "char_formatting_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::str_format
+    absl::strings
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     pow10_helper
@@ -532,7 +604,296 @@
   DEPS
     absl::pow10_helper
     absl::str_format
-    gmock_main
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cord_internal
+  HDRS
+    "internal/cord_data_edge.h"
+    "internal/cord_internal.h"
+    "internal/cord_rep_btree.h"
+    "internal/cord_rep_btree_navigator.h"
+    "internal/cord_rep_btree_reader.h"
+    "internal/cord_rep_crc.h"
+    "internal/cord_rep_consume.h"
+    "internal/cord_rep_flat.h"
+    "internal/cord_rep_ring.h"
+    "internal/cord_rep_ring_reader.h"
+  SRCS
+    "internal/cord_internal.cc"
+    "internal/cord_rep_btree.cc"
+    "internal/cord_rep_btree_navigator.cc"
+    "internal/cord_rep_btree_reader.cc"
+    "internal/cord_rep_crc.cc"
+    "internal/cord_rep_consume.cc"
+    "internal/cord_rep_ring.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base_internal
+    absl::compressed_tuple
+    absl::config
+    absl::container_memory
+    absl::core_headers
+    absl::crc_cord_state
+    absl::endian
+    absl::inlined_vector
+    absl::layout
+    absl::raw_logging_internal
+    absl::strings
+    absl::throw_delegate
+    absl::type_traits
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_update_tracker
+  HDRS
+    "internal/cordz_update_tracker.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+)
+
+absl_cc_test(
+  NAME
+    cordz_update_tracker_test
+  SRCS
+    "internal/cordz_update_tracker_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cordz_update_tracker
+    absl::core_headers
+    absl::synchronization
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_functions
+  HDRS
+    "internal/cordz_functions.h"
+  SRCS
+    "internal/cordz_functions.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::exponential_biased
+    absl::raw_logging_internal
+)
+
+absl_cc_test(
+  NAME
+    cordz_functions_test
+  SRCS
+    "internal/cordz_functions_test.cc"
+  DEPS
+    absl::config
+    absl::cordz_functions
+    absl::cordz_test_helpers
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_statistics
+  HDRS
+    "internal/cordz_statistics.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::cordz_update_tracker
+    absl::synchronization
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_handle
+  HDRS
+    "internal/cordz_handle.h"
+  SRCS
+    "internal/cordz_handle.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::raw_logging_internal
+    absl::synchronization
+)
+
+absl_cc_test(
+  NAME
+    cordz_handle_test
+  SRCS
+    "internal/cordz_handle_test.cc"
+  DEPS
+    absl::config
+    absl::cordz_handle
+    absl::cordz_test_helpers
+    absl::memory
+    absl::random_random
+    absl::random_distributions
+    absl::synchronization
+    absl::time
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_info
+  HDRS
+    "internal/cordz_info.h"
+  SRCS
+    "internal/cordz_info.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::cord_internal
+    absl::cordz_functions
+    absl::cordz_handle
+    absl::cordz_statistics
+    absl::cordz_update_tracker
+    absl::core_headers
+    absl::inlined_vector
+    absl::span
+    absl::raw_logging_internal
+    absl::stacktrace
+    absl::synchronization
+    absl::time
+)
+
+absl_cc_test(
+  NAME
+    cordz_info_test
+  SRCS
+    "internal/cordz_info_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord_internal
+    absl::cordz_test_helpers
+    absl::cordz_handle
+    absl::cordz_info
+    absl::cordz_statistics
+    absl::cordz_test_helpers
+    absl::cordz_update_tracker
+    absl::span
+    absl::stacktrace
+    absl::symbolize
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cordz_info_statistics_test
+  SRCS
+    "internal/cordz_info_statistics_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord
+    absl::cord_internal
+    absl::cordz_info
+    absl::cordz_sample_token
+    absl::cordz_statistics
+    absl::cordz_update_scope
+    absl::cordz_update_tracker
+    absl::crc_cord_state
+    absl::thread_pool
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_sample_token
+  HDRS
+    "internal/cordz_sample_token.h"
+  SRCS
+    "internal/cordz_sample_token.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::cordz_handle
+    absl::cordz_info
+)
+
+absl_cc_test(
+  NAME
+    cordz_sample_token_test
+  SRCS
+    "internal/cordz_sample_token_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord_internal
+    absl::cordz_handle
+    absl::cordz_info
+    absl::cordz_info
+    absl::cordz_sample_token
+    absl::cordz_test_helpers
+    absl::memory
+    absl::random_random
+    absl::synchronization
+    absl::thread_pool
+    absl::time
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_update_scope
+  HDRS
+    "internal/cordz_update_scope.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::cord_internal
+    absl::cordz_info
+    absl::cordz_update_tracker
+    absl::core_headers
+)
+
+absl_cc_test(
+  NAME
+    cordz_update_scope_test
+  SRCS
+    "internal/cordz_update_scope_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord_internal
+    absl::cordz_info
+    absl::cordz_test_helpers
+    absl::cordz_update_scope
+    absl::cordz_update_tracker
+    absl::core_headers
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -540,28 +901,52 @@
     cord
   HDRS
     "cord.h"
+    "cord_buffer.h"
   SRCS
     "cord.cc"
-    "internal/cord_internal.h"
+    "cord_analysis.cc"
+    "cord_analysis.h"
+    "cord_buffer.cc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::base
-    absl::base_internal
-    absl::compressed_tuple
+    absl::config
+    absl::cord_internal
+    absl::cordz_functions
+    absl::cordz_info
+    absl::cordz_update_scope
+    absl::cordz_update_tracker
     absl::core_headers
+    absl::crc_cord_state
     absl::endian
     absl::fixed_array
     absl::function_ref
     absl::inlined_vector
     absl::optional
     absl::raw_logging_internal
+    absl::span
     absl::strings
-    absl::strings_internal
     absl::type_traits
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cord_rep_test_util
+  HDRS
+    "internal/cord_rep_test_util.h"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord_internal
+    absl::raw_logging_internal
+    absl::strings
+  TESTONLY
+)
+
 absl_cc_library(
   NAME
     cord_test_helpers
@@ -570,7 +955,32 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::config
     absl::cord
+    absl::cord_internal
+    absl::strings
+  TESTONLY
+  PUBLIC
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    cordz_test_helpers
+  HDRS
+    "cordz_test_helpers.h"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord
+    absl::cord_internal
+    absl::cordz_info
+    absl::cordz_sample_token
+    absl::cordz_statistics
+    absl::cordz_update_tracker
+    absl::core_headers
+    absl::strings
   TESTONLY
 )
 
@@ -582,14 +992,162 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::base
+    absl::check
+    absl::config
     absl::cord
+    absl::cord_test_helpers
+    absl::cordz_test_helpers
+    absl::core_headers
+    absl::endian
+    absl::fixed_array
+    absl::hash
+    absl::log
+    absl::random_random
     absl::str_format
     absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_data_edge_test
+  SRCS
+    "internal/cord_data_edge_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::cord_internal
+    absl::cord_rep_test_util
+    absl::core_headers
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_rep_btree_test
+  SRCS
+    "internal/cord_rep_btree_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::cleanup
+    absl::config
+    absl::cord_internal
+    absl::cord_rep_test_util
+    absl::core_headers
+    absl::raw_logging_internal
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_rep_btree_navigator_test
+  SRCS
+    "internal/cord_rep_btree_navigator_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::cord_internal
+    absl::cord_rep_test_util
+    absl::core_headers
+    absl::raw_logging_internal
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_rep_btree_reader_test
+  SRCS
+    "internal/cord_rep_btree_reader_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::cord_internal
+    absl::cord_rep_test_util
+    absl::core_headers
+    absl::raw_logging_internal
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_rep_crc_test
+  SRCS
+    "internal/cord_rep_crc_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::cord_internal
+    absl::cord_rep_test_util
+    absl::crc_cord_state
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_ring_test
+  SRCS
+    "cord_ring_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::cord_internal
+    absl::core_headers
+    absl::raw_logging_internal
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cord_ring_reader_test
+  SRCS
+    "cord_ring_reader_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::cord_internal
+    absl::core_headers
+    absl::strings
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    cordz_test
+  SRCS
+    "cordz_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::cord
+    absl::cord_test_helpers
+    absl::cordz_test_helpers
+    absl::cordz_functions
+    absl::cordz_info
+    absl::cordz_sample_token
+    absl::cordz_statistics
+    absl::cordz_update_tracker
     absl::base
     absl::config
     absl::core_headers
-    absl::endian
     absl::raw_logging_internal
-    absl::fixed_array
-    gmock_main
+    absl::strings
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/strings/ascii.cc b/abseil-cpp/absl/strings/ascii.cc
index 93bb03e..16c9689 100644
--- a/abseil-cpp/absl/strings/ascii.cc
+++ b/abseil-cpp/absl/strings/ascii.cc
@@ -14,6 +14,10 @@
 
 #include "absl/strings/ascii.h"
 
+#include <climits>
+#include <cstring>
+#include <string>
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace ascii_internal {
@@ -153,18 +157,62 @@
 };
 // clang-format on
 
-}  // namespace ascii_internal
+template <bool ToUpper>
+constexpr void AsciiStrCaseFold(char* p, char* end) {
+  // The upper- and lowercase versions of ASCII characters differ by only 1 bit.
+  // When we need to flip the case, we can xor with this bit to achieve the
+  // desired result. Note that the choice of 'a' and 'A' here is arbitrary. We
+  // could have chosen 'z' and 'Z', or any other pair of characters as they all
+  // have the same single bit difference.
+  constexpr unsigned char kAsciiCaseBitFlip = 'a' ^ 'A';
 
-void AsciiStrToLower(std::string* s) {
-  for (auto& ch : *s) {
-    ch = absl::ascii_tolower(ch);
+  constexpr char ch_a = ToUpper ? 'a' : 'A';
+  constexpr char ch_z = ToUpper ? 'z' : 'Z';
+  for (; p < end; ++p) {
+    unsigned char v = static_cast<unsigned char>(*p);
+    // We use & instead of && to ensure this always stays branchless
+    // We use static_cast<int> to suppress -Wbitwise-instead-of-logical
+    bool is_in_range = static_cast<bool>(static_cast<int>(ch_a <= v) &
+                                         static_cast<int>(v <= ch_z));
+    v ^= is_in_range ? kAsciiCaseBitFlip : 0;
+    *p = static_cast<char>(v);
   }
 }
 
-void AsciiStrToUpper(std::string* s) {
-  for (auto& ch : *s) {
-    ch = absl::ascii_toupper(ch);
+static constexpr size_t ValidateAsciiCasefold() {
+  constexpr size_t num_chars = 1 + CHAR_MAX - CHAR_MIN;
+  size_t incorrect_index = 0;
+  char lowered[num_chars] = {};
+  char uppered[num_chars] = {};
+  for (unsigned int i = 0; i < num_chars; ++i) {
+    uppered[i] = lowered[i] = static_cast<char>(i);
   }
+  AsciiStrCaseFold<false>(&lowered[0], &lowered[num_chars]);
+  AsciiStrCaseFold<true>(&uppered[0], &uppered[num_chars]);
+  for (size_t i = 0; i < num_chars; ++i) {
+    const char ch = static_cast<char>(i),
+               ch_upper = ('a' <= ch && ch <= 'z' ? 'A' + (ch - 'a') : ch),
+               ch_lower = ('A' <= ch && ch <= 'Z' ? 'a' + (ch - 'A') : ch);
+    if (uppered[i] != ch_upper || lowered[i] != ch_lower) {
+      incorrect_index = i > 0 ? i : num_chars;
+      break;
+    }
+  }
+  return incorrect_index;
+}
+
+static_assert(ValidateAsciiCasefold() == 0, "error in case conversion");
+
+}  // namespace ascii_internal
+
+void AsciiStrToLower(std::string* s) {
+  char* p = &(*s)[0];  // Guaranteed to be valid for empty strings
+  return ascii_internal::AsciiStrCaseFold<false>(p, p + s->size());
+}
+
+void AsciiStrToUpper(std::string* s) {
+  char* p = &(*s)[0];  // Guaranteed to be valid for empty strings
+  return ascii_internal::AsciiStrCaseFold<true>(p, p + s->size());
 }
 
 void RemoveExtraAsciiWhitespace(std::string* str) {
@@ -183,17 +231,17 @@
   for (; input_it < input_end; ++input_it) {
     if (is_ws) {
       // Consecutive whitespace?  Keep only the last.
-      is_ws = absl::ascii_isspace(*input_it);
+      is_ws = absl::ascii_isspace(static_cast<unsigned char>(*input_it));
       if (is_ws) --output_it;
     } else {
-      is_ws = absl::ascii_isspace(*input_it);
+      is_ws = absl::ascii_isspace(static_cast<unsigned char>(*input_it));
     }
 
     *output_it = *input_it;
     ++output_it;
   }
 
-  str->erase(output_it - &(*str)[0]);
+  str->erase(static_cast<size_t>(output_it - &(*str)[0]));
 }
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/strings/ascii.h b/abseil-cpp/absl/strings/ascii.h
index b46bc71..42eadae 100644
--- a/abseil-cpp/absl/strings/ascii.h
+++ b/abseil-cpp/absl/strings/ascii.h
@@ -133,7 +133,7 @@
 
 // ascii_isprint()
 //
-// Determines whether the given character is printable, including whitespace.
+// Determines whether the given character is printable, including spaces.
 inline bool ascii_isprint(unsigned char c) { return c >= 32 && c < 127; }
 
 // ascii_isgraph()
@@ -197,7 +197,7 @@
 ABSL_MUST_USE_RESULT inline absl::string_view StripLeadingAsciiWhitespace(
     absl::string_view str) {
   auto it = std::find_if_not(str.begin(), str.end(), absl::ascii_isspace);
-  return str.substr(it - str.begin());
+  return str.substr(static_cast<size_t>(it - str.begin()));
 }
 
 // Strips in place whitespace from the beginning of the given string.
@@ -211,13 +211,13 @@
 ABSL_MUST_USE_RESULT inline absl::string_view StripTrailingAsciiWhitespace(
     absl::string_view str) {
   auto it = std::find_if_not(str.rbegin(), str.rend(), absl::ascii_isspace);
-  return str.substr(0, str.rend() - it);
+  return str.substr(0, static_cast<size_t>(str.rend() - it));
 }
 
 // Strips in place whitespace from the end of the given string
 inline void StripTrailingAsciiWhitespace(std::string* str) {
   auto it = std::find_if_not(str->rbegin(), str->rend(), absl::ascii_isspace);
-  str->erase(str->rend() - it);
+  str->erase(static_cast<size_t>(str->rend() - it));
 }
 
 // Returns absl::string_view with whitespace stripped from both ends of the
diff --git a/abseil-cpp/absl/strings/ascii_test.cc b/abseil-cpp/absl/strings/ascii_test.cc
index 5ecd23f..4ea262f 100644
--- a/abseil-cpp/absl/strings/ascii_test.cc
+++ b/abseil-cpp/absl/strings/ascii_test.cc
@@ -14,6 +14,7 @@
 
 #include "absl/strings/ascii.h"
 
+#include <algorithm>
 #include <cctype>
 #include <clocale>
 #include <cstring>
@@ -27,103 +28,99 @@
 
 TEST(AsciiIsFoo, All) {
   for (int i = 0; i < 256; i++) {
-    if ((i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z'))
-      EXPECT_TRUE(absl::ascii_isalpha(i)) << ": failed on " << i;
+    const auto c = static_cast<unsigned char>(i);
+    if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))
+      EXPECT_TRUE(absl::ascii_isalpha(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isalpha(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isalpha(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
-    if ((i >= '0' && i <= '9'))
-      EXPECT_TRUE(absl::ascii_isdigit(i)) << ": failed on " << i;
+    const auto c = static_cast<unsigned char>(i);
+    if ((c >= '0' && c <= '9'))
+      EXPECT_TRUE(absl::ascii_isdigit(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isdigit(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isdigit(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
-    if (absl::ascii_isalpha(i) || absl::ascii_isdigit(i))
-      EXPECT_TRUE(absl::ascii_isalnum(i)) << ": failed on " << i;
+    const auto c = static_cast<unsigned char>(i);
+    if (absl::ascii_isalpha(c) || absl::ascii_isdigit(c))
+      EXPECT_TRUE(absl::ascii_isalnum(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isalnum(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isalnum(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i != '\0' && strchr(" \r\n\t\v\f", i))
-      EXPECT_TRUE(absl::ascii_isspace(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_isspace(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isspace(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isspace(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i >= 32 && i < 127)
-      EXPECT_TRUE(absl::ascii_isprint(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_isprint(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isprint(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isprint(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
-    if (absl::ascii_isprint(i) && !absl::ascii_isspace(i) &&
-        !absl::ascii_isalnum(i))
-      EXPECT_TRUE(absl::ascii_ispunct(i)) << ": failed on " << i;
-    else
-      EXPECT_TRUE(!absl::ascii_ispunct(i)) << ": failed on " << i;
+    const auto c = static_cast<unsigned char>(i);
+    if (absl::ascii_isprint(c) && !absl::ascii_isspace(c) &&
+        !absl::ascii_isalnum(c)) {
+      EXPECT_TRUE(absl::ascii_ispunct(c)) << ": failed on " << c;
+    } else {
+      EXPECT_TRUE(!absl::ascii_ispunct(c)) << ": failed on " << c;
+    }
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i == ' ' || i == '\t')
-      EXPECT_TRUE(absl::ascii_isblank(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_isblank(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isblank(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isblank(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i < 32 || i == 127)
-      EXPECT_TRUE(absl::ascii_iscntrl(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_iscntrl(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_iscntrl(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_iscntrl(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
-    if (absl::ascii_isdigit(i) || (i >= 'A' && i <= 'F') ||
-        (i >= 'a' && i <= 'f'))
-      EXPECT_TRUE(absl::ascii_isxdigit(i)) << ": failed on " << i;
-    else
-      EXPECT_TRUE(!absl::ascii_isxdigit(i)) << ": failed on " << i;
+    const auto c = static_cast<unsigned char>(i);
+    if (absl::ascii_isdigit(c) || (i >= 'A' && i <= 'F') ||
+        (i >= 'a' && i <= 'f')) {
+      EXPECT_TRUE(absl::ascii_isxdigit(c)) << ": failed on " << c;
+    } else {
+      EXPECT_TRUE(!absl::ascii_isxdigit(c)) << ": failed on " << c;
+    }
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i > 32 && i < 127)
-      EXPECT_TRUE(absl::ascii_isgraph(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_isgraph(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isgraph(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isgraph(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i >= 'A' && i <= 'Z')
-      EXPECT_TRUE(absl::ascii_isupper(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_isupper(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_isupper(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_isupper(c)) << ": failed on " << c;
   }
   for (int i = 0; i < 256; i++) {
+    const auto c = static_cast<unsigned char>(i);
     if (i >= 'a' && i <= 'z')
-      EXPECT_TRUE(absl::ascii_islower(i)) << ": failed on " << i;
+      EXPECT_TRUE(absl::ascii_islower(c)) << ": failed on " << c;
     else
-      EXPECT_TRUE(!absl::ascii_islower(i)) << ": failed on " << i;
+      EXPECT_TRUE(!absl::ascii_islower(c)) << ": failed on " << c;
   }
-  for (int i = 0; i < 128; i++) {
-    EXPECT_TRUE(absl::ascii_isascii(i)) << ": failed on " << i;
+  for (unsigned char c = 0; c < 128; c++) {
+    EXPECT_TRUE(absl::ascii_isascii(c)) << ": failed on " << c;
   }
   for (int i = 128; i < 256; i++) {
-    EXPECT_TRUE(!absl::ascii_isascii(i)) << ": failed on " << i;
-  }
-
-  // The official is* functions don't accept negative signed chars, but
-  // our absl::ascii_is* functions do.
-  for (int i = 0; i < 256; i++) {
-    signed char sc = static_cast<signed char>(static_cast<unsigned char>(i));
-    EXPECT_EQ(absl::ascii_isalpha(i), absl::ascii_isalpha(sc)) << i;
-    EXPECT_EQ(absl::ascii_isdigit(i), absl::ascii_isdigit(sc)) << i;
-    EXPECT_EQ(absl::ascii_isalnum(i), absl::ascii_isalnum(sc)) << i;
-    EXPECT_EQ(absl::ascii_isspace(i), absl::ascii_isspace(sc)) << i;
-    EXPECT_EQ(absl::ascii_ispunct(i), absl::ascii_ispunct(sc)) << i;
-    EXPECT_EQ(absl::ascii_isblank(i), absl::ascii_isblank(sc)) << i;
-    EXPECT_EQ(absl::ascii_iscntrl(i), absl::ascii_iscntrl(sc)) << i;
-    EXPECT_EQ(absl::ascii_isxdigit(i), absl::ascii_isxdigit(sc)) << i;
-    EXPECT_EQ(absl::ascii_isprint(i), absl::ascii_isprint(sc)) << i;
-    EXPECT_EQ(absl::ascii_isgraph(i), absl::ascii_isgraph(sc)) << i;
-    EXPECT_EQ(absl::ascii_isupper(i), absl::ascii_isupper(sc)) << i;
-    EXPECT_EQ(absl::ascii_islower(i), absl::ascii_islower(sc)) << i;
-    EXPECT_EQ(absl::ascii_isascii(i), absl::ascii_isascii(sc)) << i;
+    const auto c = static_cast<unsigned char>(i);
+    EXPECT_TRUE(!absl::ascii_isascii(c)) << ": failed on " << c;
   }
 }
 
@@ -137,19 +134,20 @@
 #endif
 
   for (int i = 0; i < 256; i++) {
-    EXPECT_EQ(isalpha(i) != 0, absl::ascii_isalpha(i)) << i;
-    EXPECT_EQ(isdigit(i) != 0, absl::ascii_isdigit(i)) << i;
-    EXPECT_EQ(isalnum(i) != 0, absl::ascii_isalnum(i)) << i;
-    EXPECT_EQ(isspace(i) != 0, absl::ascii_isspace(i)) << i;
-    EXPECT_EQ(ispunct(i) != 0, absl::ascii_ispunct(i)) << i;
-    EXPECT_EQ(isblank(i) != 0, absl::ascii_isblank(i)) << i;
-    EXPECT_EQ(iscntrl(i) != 0, absl::ascii_iscntrl(i)) << i;
-    EXPECT_EQ(isxdigit(i) != 0, absl::ascii_isxdigit(i)) << i;
-    EXPECT_EQ(isprint(i) != 0, absl::ascii_isprint(i)) << i;
-    EXPECT_EQ(isgraph(i) != 0, absl::ascii_isgraph(i)) << i;
-    EXPECT_EQ(isupper(i) != 0, absl::ascii_isupper(i)) << i;
-    EXPECT_EQ(islower(i) != 0, absl::ascii_islower(i)) << i;
-    EXPECT_EQ(isascii(i) != 0, absl::ascii_isascii(i)) << i;
+    const auto c = static_cast<unsigned char>(i);
+    EXPECT_EQ(isalpha(c) != 0, absl::ascii_isalpha(c)) << c;
+    EXPECT_EQ(isdigit(c) != 0, absl::ascii_isdigit(c)) << c;
+    EXPECT_EQ(isalnum(c) != 0, absl::ascii_isalnum(c)) << c;
+    EXPECT_EQ(isspace(c) != 0, absl::ascii_isspace(c)) << c;
+    EXPECT_EQ(ispunct(c) != 0, absl::ascii_ispunct(c)) << c;
+    EXPECT_EQ(isblank(c) != 0, absl::ascii_isblank(c)) << c;
+    EXPECT_EQ(iscntrl(c) != 0, absl::ascii_iscntrl(c)) << c;
+    EXPECT_EQ(isxdigit(c) != 0, absl::ascii_isxdigit(c)) << c;
+    EXPECT_EQ(isprint(c) != 0, absl::ascii_isprint(c)) << c;
+    EXPECT_EQ(isgraph(c) != 0, absl::ascii_isgraph(c)) << c;
+    EXPECT_EQ(isupper(c) != 0, absl::ascii_isupper(c)) << c;
+    EXPECT_EQ(islower(c) != 0, absl::ascii_islower(c)) << c;
+    EXPECT_EQ(isascii(c) != 0, absl::ascii_isascii(c)) << c;
   }
 
 #ifndef __ANDROID__
@@ -166,25 +164,20 @@
 #endif
 
   for (int i = 0; i < 256; i++) {
-    if (absl::ascii_islower(i))
-      EXPECT_EQ(absl::ascii_toupper(i), 'A' + (i - 'a')) << i;
+    const auto c = static_cast<unsigned char>(i);
+    if (absl::ascii_islower(c))
+      EXPECT_EQ(absl::ascii_toupper(c), 'A' + (i - 'a')) << c;
     else
-      EXPECT_EQ(absl::ascii_toupper(i), static_cast<char>(i)) << i;
+      EXPECT_EQ(absl::ascii_toupper(c), static_cast<char>(i)) << c;
 
-    if (absl::ascii_isupper(i))
-      EXPECT_EQ(absl::ascii_tolower(i), 'a' + (i - 'A')) << i;
+    if (absl::ascii_isupper(c))
+      EXPECT_EQ(absl::ascii_tolower(c), 'a' + (i - 'A')) << c;
     else
-      EXPECT_EQ(absl::ascii_tolower(i), static_cast<char>(i)) << i;
+      EXPECT_EQ(absl::ascii_tolower(c), static_cast<char>(i)) << c;
 
     // These CHECKs only hold in a C locale.
-    EXPECT_EQ(static_cast<char>(tolower(i)), absl::ascii_tolower(i)) << i;
-    EXPECT_EQ(static_cast<char>(toupper(i)), absl::ascii_toupper(i)) << i;
-
-    // The official to* functions don't accept negative signed chars, but
-    // our absl::ascii_to* functions do.
-    signed char sc = static_cast<signed char>(static_cast<unsigned char>(i));
-    EXPECT_EQ(absl::ascii_tolower(i), absl::ascii_tolower(sc)) << i;
-    EXPECT_EQ(absl::ascii_toupper(i), absl::ascii_toupper(sc)) << i;
+    EXPECT_EQ(static_cast<char>(tolower(i)), absl::ascii_tolower(c)) << c;
+    EXPECT_EQ(static_cast<char>(toupper(i)), absl::ascii_toupper(c)) << c;
   }
 #ifndef __ANDROID__
   // restore the old locale.
@@ -197,11 +190,15 @@
   const std::string str("GHIJKL");
   const std::string str2("MNOPQR");
   const absl::string_view sp(str2);
+  std::string mutable_str("_`?@[{AMNOPQRSTUVWXYZ");
 
   EXPECT_EQ("abcdef", absl::AsciiStrToLower(buf));
   EXPECT_EQ("ghijkl", absl::AsciiStrToLower(str));
   EXPECT_EQ("mnopqr", absl::AsciiStrToLower(sp));
 
+  absl::AsciiStrToLower(&mutable_str);
+  EXPECT_EQ("_`?@[{amnopqrstuvwxyz", mutable_str);
+
   char mutable_buf[] = "Mutable";
   std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
                  mutable_buf, absl::ascii_tolower);
@@ -211,12 +208,12 @@
 TEST(AsciiStrTo, Upper) {
   const char buf[] = "abcdef";
   const std::string str("ghijkl");
-  const std::string str2("mnopqr");
+  const std::string str2("_`?@[{amnopqrstuvwxyz");
   const absl::string_view sp(str2);
 
   EXPECT_EQ("ABCDEF", absl::AsciiStrToUpper(buf));
   EXPECT_EQ("GHIJKL", absl::AsciiStrToUpper(str));
-  EXPECT_EQ("MNOPQR", absl::AsciiStrToUpper(sp));
+  EXPECT_EQ("_`?@[{AMNOPQRSTUVWXYZ", absl::AsciiStrToUpper(sp));
 
   char mutable_buf[] = "Mutable";
   std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
diff --git a/abseil-cpp/absl/strings/atod_manual_test.cc b/abseil-cpp/absl/strings/atod_manual_test.cc
new file mode 100644
index 0000000..6cf28b0
--- /dev/null
+++ b/abseil-cpp/absl/strings/atod_manual_test.cc
@@ -0,0 +1,193 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This program tests the absl::SimpleAtod and absl::SimpleAtof functions. Run
+// it as "atod_manual_test pnftd/data/*.txt" where the pnftd directory is a
+// local checkout of the https://github.com/nigeltao/parse-number-fxx-test-data
+// repository. The test suite lives in a separate repository because its more
+// than 5 million test cases weigh over several hundred megabytes and because
+// the test cases are also useful to other software projects, not just Abseil.
+// Its data/*.txt files contain one test case per line, like:
+//
+// 3C00 3F800000 3FF0000000000000 1
+// 3D00 3FA00000 3FF4000000000000 1.25
+// 3D9A 3FB33333 3FF6666666666666 1.4
+// 57B7 42F6E979 405EDD2F1A9FBE77 123.456
+// 622A 44454000 4088A80000000000 789
+// 7C00 7F800000 7FF0000000000000 123.456e789
+//
+// For each line (and using 0-based column indexes), columns [5..13] and
+// [14..30] contain the 32-bit float and 64-bit double result of parsing
+// columns [31..].
+//
+// For example, parsing "1.4" as a float gives the bits 0x3FB33333.
+//
+// In this 6-line example, the final line's float and double values are all
+// infinity. The largest finite float and double values are approximately
+// 3.40e+38 and 1.80e+308.
+
+#include <cstdint>
+#include <cstdio>
+#include <string>
+
+#include "absl/base/casts.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+
+static constexpr uint8_t kUnhex[256] = {
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,  // '0' ..= '7'
+    0x8, 0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  // '8' ..= '9'
+
+    0x0, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x0,  // 'A' ..= 'F'
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,  //
+};
+
+static absl::optional<std::string> ReadFileToString(const char* filename) {
+  FILE* f = fopen(filename, "rb");
+  if (!f) {
+    return absl::nullopt;
+  }
+  fseek(f, 0, SEEK_END);
+  size_t size = ftell(f);
+  fseek(f, 0, SEEK_SET);
+  std::string s(size, '\x00');
+  size_t n = fread(&s[0], 1, size, f);
+  fclose(f);
+  if (n != size) {
+    return absl::nullopt;
+  }
+  return s;
+}
+
+static bool ProcessOneTestFile(const char* filename) {
+  absl::optional<std::string> contents = ReadFileToString(filename);
+  if (!contents) {
+    absl::FPrintF(stderr, "Invalid file: %s\n", filename);
+    return false;
+  }
+
+  int num_cases = 0;
+  for (absl::string_view v(*contents); !v.empty();) {
+    size_t new_line = v.find('\n');
+    if ((new_line == absl::string_view::npos) || (new_line < 32)) {
+      break;
+    }
+    absl::string_view input = v.substr(31, new_line - 31);
+
+    // Test absl::SimpleAtof.
+    {
+      float f;
+      if (!absl::SimpleAtof(input, &f)) {
+        absl::FPrintF(stderr, "Could not parse \"%s\" in %s\n", input,
+                      filename);
+        return false;
+      }
+      uint32_t have32 = absl::bit_cast<uint32_t>(f);
+
+      uint32_t want32 = 0;
+      for (int i = 0; i < 8; i++) {
+        want32 = (want32 << 4) | kUnhex[static_cast<unsigned char>(v[5 + i])];
+      }
+
+      if (have32 != want32) {
+        absl::FPrintF(stderr,
+                      "absl::SimpleAtof failed parsing \"%s\" in %s\n  have  "
+                      "%08X\n  want  %08X\n",
+                      input, filename, have32, want32);
+        return false;
+      }
+    }
+
+    // Test absl::SimpleAtod.
+    {
+      double d;
+      if (!absl::SimpleAtod(input, &d)) {
+        absl::FPrintF(stderr, "Could not parse \"%s\" in %s\n", input,
+                      filename);
+        return false;
+      }
+      uint64_t have64 = absl::bit_cast<uint64_t>(d);
+
+      uint64_t want64 = 0;
+      for (int i = 0; i < 16; i++) {
+        want64 = (want64 << 4) | kUnhex[static_cast<unsigned char>(v[14 + i])];
+      }
+
+      if (have64 != want64) {
+        absl::FPrintF(stderr,
+                      "absl::SimpleAtod failed parsing \"%s\" in %s\n  have  "
+                      "%016X\n  want  %016X\n",
+                      input, filename, have64, want64);
+        return false;
+      }
+    }
+
+    num_cases++;
+    v = v.substr(new_line + 1);
+  }
+  printf("%8d OK in %s\n", num_cases, filename);
+  return true;
+}
+
+int main(int argc, char** argv) {
+  if (argc < 2) {
+    absl::FPrintF(
+        stderr,
+        "Usage: %s pnftd/data/*.txt\nwhere the pnftd directory is a local "
+        "checkout of "
+        "the\nhttps://github.com/nigeltao/parse-number-fxx-test-data "
+        "repository.\n",
+        argv[0]);
+    return 1;
+  }
+
+  for (int i = 1; i < argc; i++) {
+    if (!ProcessOneTestFile(argv[i])) {
+      return 1;
+    }
+  }
+  return 0;
+}
diff --git a/abseil-cpp/absl/strings/char_formatting_test.cc b/abseil-cpp/absl/strings/char_formatting_test.cc
new file mode 100644
index 0000000..1692da7
--- /dev/null
+++ b/abseil-cpp/absl/strings/char_formatting_test.cc
@@ -0,0 +1,169 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/substitute.h"
+
+namespace {
+
+TEST(CharFormatting, Char) {
+  const char v = 'A';
+
+  // Desired behavior: does not compile:
+  // EXPECT_EQ(absl::StrCat(v, "B"), "AB");
+  // EXPECT_EQ(absl::StrFormat("%vB", v), "AB");
+
+  // Legacy behavior: format as char:
+  EXPECT_EQ(absl::Substitute("$0B", v), "AB");
+}
+
+enum CharEnum : char {};
+TEST(CharFormatting, CharEnum) {
+  auto v = static_cast<CharEnum>('A');
+
+  // Desired behavior: format as decimal
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+
+  // Legacy behavior: format as character:
+
+  // Some older versions of gcc behave differently in this one case
+#if !defined(__GNUC__) || defined(__clang__)
+  EXPECT_EQ(absl::Substitute("$0B", v), "AB");
+#endif
+}
+
+enum class CharEnumClass: char {};
+TEST(CharFormatting, CharEnumClass) {
+  auto v = static_cast<CharEnumClass>('A');
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+
+  // Legacy behavior: format as character:
+  EXPECT_EQ(absl::Substitute("$0B", v), "AB");
+}
+
+TEST(CharFormatting, UnsignedChar) {
+  const unsigned char v = 'A';
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+  // Signedness check
+  const unsigned char w = 255;
+  EXPECT_EQ(absl::StrCat(w, "B"), "255B");
+  EXPECT_EQ(absl::Substitute("$0B", w), "255B");
+  // EXPECT_EQ(absl::StrFormat("%vB", v), "255B");
+}
+
+TEST(CharFormatting, SignedChar) {
+  const signed char v = 'A';
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+  // Signedness check
+  const signed char w = -128;
+  EXPECT_EQ(absl::StrCat(w, "B"), "-128B");
+  EXPECT_EQ(absl::Substitute("$0B", w), "-128B");
+}
+
+enum UnsignedCharEnum : unsigned char {};
+TEST(CharFormatting, UnsignedCharEnum) {
+  auto v = static_cast<UnsignedCharEnum>('A');
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+  // Signedness check
+  auto w = static_cast<UnsignedCharEnum>(255);
+  EXPECT_EQ(absl::StrCat(w, "B"), "255B");
+  EXPECT_EQ(absl::Substitute("$0B", w), "255B");
+  EXPECT_EQ(absl::StrFormat("%vB", w), "255B");
+}
+
+enum SignedCharEnum : signed char {};
+TEST(CharFormatting, SignedCharEnum) {
+  auto v = static_cast<SignedCharEnum>('A');
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+  // Signedness check
+  auto w = static_cast<SignedCharEnum>(-128);
+  EXPECT_EQ(absl::StrCat(w, "B"), "-128B");
+  EXPECT_EQ(absl::Substitute("$0B", w), "-128B");
+  EXPECT_EQ(absl::StrFormat("%vB", w), "-128B");
+}
+
+enum class UnsignedCharEnumClass : unsigned char {};
+TEST(CharFormatting, UnsignedCharEnumClass) {
+  auto v = static_cast<UnsignedCharEnumClass>('A');
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+  // Signedness check
+  auto w = static_cast<UnsignedCharEnumClass>(255);
+  EXPECT_EQ(absl::StrCat(w, "B"), "255B");
+  EXPECT_EQ(absl::Substitute("$0B", w), "255B");
+  EXPECT_EQ(absl::StrFormat("%vB", w), "255B");
+}
+
+enum SignedCharEnumClass : signed char {};
+TEST(CharFormatting, SignedCharEnumClass) {
+  auto v = static_cast<SignedCharEnumClass>('A');
+
+  // Desired behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+  // Signedness check
+  auto w = static_cast<SignedCharEnumClass>(-128);
+  EXPECT_EQ(absl::StrCat(w, "B"), "-128B");
+  EXPECT_EQ(absl::Substitute("$0B", w), "-128B");
+  EXPECT_EQ(absl::StrFormat("%vB", w), "-128B");
+}
+
+#ifdef __cpp_lib_byte
+TEST(CharFormatting, StdByte) {
+  auto v = static_cast<std::byte>('A');
+  // Desired behavior: format as 0xff
+  // (No APIs do this today.)
+
+  // Legacy behavior: format as decimal:
+  EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+  EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+  EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+}
+#endif  // _cpp_lib_byte
+
+}  // namespace
diff --git a/abseil-cpp/absl/strings/charconv.cc b/abseil-cpp/absl/strings/charconv.cc
index 3613a65..778a1c7 100644
--- a/abseil-cpp/absl/strings/charconv.cc
+++ b/abseil-cpp/absl/strings/charconv.cc
@@ -18,9 +18,11 @@
 #include <cassert>
 #include <cmath>
 #include <cstring>
+#include <limits>
 
 #include "absl/base/casts.h"
-#include "absl/base/internal/bits.h"
+#include "absl/base/config.h"
+#include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
 #include "absl/strings/internal/charconv_bigint.h"
 #include "absl/strings/internal/charconv_parse.h"
@@ -65,6 +67,14 @@
 
 template <>
 struct FloatTraits<double> {
+  using mantissa_t = uint64_t;
+
+  // The number of bits in the given float type.
+  static constexpr int kTargetBits = 64;
+
+  // The number of exponent bits in the given float type.
+  static constexpr int kTargetExponentBits = 11;
+
   // The number of mantissa bits in the given float type.  This includes the
   // implied high bit.
   static constexpr int kTargetMantissaBits = 53;
@@ -83,11 +93,43 @@
   // m * 2**kMinNormalExponent is exactly equal to DBL_MIN.
   static constexpr int kMinNormalExponent = -1074;
 
+  // The IEEE exponent bias.  It equals ((1 << (kTargetExponentBits - 1)) - 1).
+  static constexpr int kExponentBias = 1023;
+
+  // The Eisel-Lemire "Shifting to 54/25 Bits" adjustment.  It equals (63 - 1 -
+  // kTargetMantissaBits).
+  static constexpr int kEiselLemireShift = 9;
+
+  // The Eisel-Lemire high64_mask.  It equals ((1 << kEiselLemireShift) - 1).
+  static constexpr uint64_t kEiselLemireMask = uint64_t{0x1FF};
+
+  // The smallest negative integer N (smallest negative means furthest from
+  // zero) such that parsing 9999999999999999999eN, with 19 nines, is still
+  // positive. Parsing a smaller (more negative) N will produce zero.
+  //
+  // Adjusting the decimal point and exponent, without adjusting the value,
+  // 9999999999999999999eN equals 9.999999999999999999eM where M = N + 18.
+  //
+  // 9999999999999999999, with 19 nines but no decimal point, is the largest
+  // "repeated nines" integer that fits in a uint64_t.
+  static constexpr int kEiselLemireMinInclusiveExp10 = -324 - 18;
+
+  // The smallest positive integer N such that parsing 1eN produces infinity.
+  // Parsing a smaller N will produce something finite.
+  static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
+
   static double MakeNan(const char* tagp) {
+#if ABSL_HAVE_BUILTIN(__builtin_nan)
+    // Use __builtin_nan() if available since it has a fix for
+    // https://bugs.llvm.org/show_bug.cgi?id=37778
+    // std::nan may use the glibc implementation.
+    return __builtin_nan(tagp);
+#else
     // Support nan no matter which namespace it's in.  Some platforms
     // incorrectly don't put it in namespace std.
     using namespace std;  // NOLINT
     return nan(tagp);
+#endif
   }
 
   // Builds a nonzero floating point number out of the provided parts.
@@ -103,7 +145,7 @@
   // a normal value is made, or it must be less narrow than that, in which case
   // `exponent` must be exactly kMinNormalExponent, and a subnormal value is
   // made.
-  static double Make(uint64_t mantissa, int exponent, bool sign) {
+  static double Make(mantissa_t mantissa, int exponent, bool sign) {
 #ifndef ABSL_BIT_PACK_FLOATS
     // Support ldexp no matter which namespace it's in.  Some platforms
     // incorrectly don't put it in namespace std.
@@ -111,13 +153,15 @@
     return sign ? -ldexp(mantissa, exponent) : ldexp(mantissa, exponent);
 #else
     constexpr uint64_t kMantissaMask =
-        (uint64_t(1) << (kTargetMantissaBits - 1)) - 1;
+        (uint64_t{1} << (kTargetMantissaBits - 1)) - 1;
     uint64_t dbl = static_cast<uint64_t>(sign) << 63;
     if (mantissa > kMantissaMask) {
       // Normal value.
       // Adjust by 1023 for the exponent representation bias, and an additional
-      // 52 due to the implied decimal point in the IEEE mantissa represenation.
-      dbl += uint64_t{exponent + 1023u + kTargetMantissaBits - 1} << 52;
+      // 52 due to the implied decimal point in the IEEE mantissa
+      // representation.
+      dbl += static_cast<uint64_t>(exponent + 1023 + kTargetMantissaBits - 1)
+             << 52;
       mantissa &= kMantissaMask;
     } else {
       // subnormal value
@@ -134,16 +178,34 @@
 // members and methods.
 template <>
 struct FloatTraits<float> {
+  using mantissa_t = uint32_t;
+
+  static constexpr int kTargetBits = 32;
+  static constexpr int kTargetExponentBits = 8;
   static constexpr int kTargetMantissaBits = 24;
   static constexpr int kMaxExponent = 104;
   static constexpr int kMinNormalExponent = -149;
+  static constexpr int kExponentBias = 127;
+  static constexpr int kEiselLemireShift = 38;
+  static constexpr uint64_t kEiselLemireMask = uint64_t{0x3FFFFFFFFF};
+  static constexpr int kEiselLemireMinInclusiveExp10 = -46 - 18;
+  static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
+
   static float MakeNan(const char* tagp) {
+#if ABSL_HAVE_BUILTIN(__builtin_nanf)
+    // Use __builtin_nanf() if available since it has a fix for
+    // https://bugs.llvm.org/show_bug.cgi?id=37778
+    // std::nanf may use the glibc implementation.
+    return __builtin_nanf(tagp);
+#else
     // Support nanf no matter which namespace it's in.  Some platforms
     // incorrectly don't put it in namespace std.
     using namespace std;  // NOLINT
-    return nanf(tagp);
+    return std::nanf(tagp);
+#endif
   }
-  static float Make(uint32_t mantissa, int exponent, bool sign) {
+
+  static float Make(mantissa_t mantissa, int exponent, bool sign) {
 #ifndef ABSL_BIT_PACK_FLOATS
     // Support ldexpf no matter which namespace it's in.  Some platforms
     // incorrectly don't put it in namespace std.
@@ -151,13 +213,15 @@
     return sign ? -ldexpf(mantissa, exponent) : ldexpf(mantissa, exponent);
 #else
     constexpr uint32_t kMantissaMask =
-        (uint32_t(1) << (kTargetMantissaBits - 1)) - 1;
+        (uint32_t{1} << (kTargetMantissaBits - 1)) - 1;
     uint32_t flt = static_cast<uint32_t>(sign) << 31;
     if (mantissa > kMantissaMask) {
       // Normal value.
       // Adjust by 127 for the exponent representation bias, and an additional
-      // 23 due to the implied decimal point in the IEEE mantissa represenation.
-      flt += uint32_t{exponent + 127u + kTargetMantissaBits - 1} << 23;
+      // 23 due to the implied decimal point in the IEEE mantissa
+      // representation.
+      flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1)
+             << 23;
       mantissa &= kMantissaMask;
     } else {
       // subnormal value
@@ -181,39 +245,45 @@
 //
 //   2**63 <= Power10Mantissa(n) < 2**64.
 //
+// See the "Table of powers of 10" comment below for a "1e60" example.
+//
 // Lookups into the power-of-10 table must first check the Power10Overflow() and
 // Power10Underflow() functions, to avoid out-of-bounds table access.
 //
-// Indexes into these tables are biased by -kPower10TableMin, and the table has
-// values in the range [kPower10TableMin, kPower10TableMax].
-extern const uint64_t kPower10MantissaTable[];
-extern const int16_t kPower10ExponentTable[];
+// Indexes into these tables are biased by -kPower10TableMinInclusive. Valid
+// indexes range from kPower10TableMinInclusive to kPower10TableMaxExclusive.
+extern const uint64_t kPower10MantissaHighTable[];  // High 64 of 128 bits.
+extern const uint64_t kPower10MantissaLowTable[];   // Low  64 of 128 bits.
 
-// The smallest allowed value for use with the Power10Mantissa() and
-// Power10Exponent() functions below.  (If a smaller exponent is needed in
+// The smallest (inclusive) allowed value for use with the Power10Mantissa()
+// and Power10Exponent() functions below.  (If a smaller exponent is needed in
 // calculations, the end result is guaranteed to underflow.)
-constexpr int kPower10TableMin = -342;
+constexpr int kPower10TableMinInclusive = -342;
 
-// The largest allowed value for use with the Power10Mantissa() and
-// Power10Exponent() functions below.  (If a smaller exponent is needed in
-// calculations, the end result is guaranteed to overflow.)
-constexpr int kPower10TableMax = 308;
+// The largest (exclusive) allowed value for use with the Power10Mantissa() and
+// Power10Exponent() functions below.  (If a larger-or-equal exponent is needed
+// in calculations, the end result is guaranteed to overflow.)
+constexpr int kPower10TableMaxExclusive = 309;
 
 uint64_t Power10Mantissa(int n) {
-  return kPower10MantissaTable[n - kPower10TableMin];
+  return kPower10MantissaHighTable[n - kPower10TableMinInclusive];
 }
 
 int Power10Exponent(int n) {
-  return kPower10ExponentTable[n - kPower10TableMin];
+  // The 217706 etc magic numbers encode the results as a formula instead of a
+  // table. Their equivalence (over the kPower10TableMinInclusive ..
+  // kPower10TableMaxExclusive range) is confirmed by
+  // https://github.com/google/wuffs/blob/315b2e52625ebd7b02d8fac13e3cd85ea374fb80/script/print-mpb-powers-of-10.go
+  return (217706 * n >> 16) - 63;
 }
 
 // Returns true if n is large enough that 10**n always results in an IEEE
 // overflow.
-bool Power10Overflow(int n) { return n > kPower10TableMax; }
+bool Power10Overflow(int n) { return n >= kPower10TableMaxExclusive; }
 
 // Returns true if n is small enough that 10**n times a ParsedFloat mantissa
 // always results in an IEEE underflow.
-bool Power10Underflow(int n) { return n < kPower10TableMin; }
+bool Power10Underflow(int n) { return n < kPower10TableMinInclusive; }
 
 // Returns true if Power10Mantissa(n) * 2**Power10Exponent(n) is exactly equal
 // to 10**n numerically.  Put another way, this returns true if there is no
@@ -244,9 +314,11 @@
 // minus the number of leading zero bits.)
 int BitWidth(uint128 value) {
   if (Uint128High64(value) == 0) {
-    return 64 - base_internal::CountLeadingZeros64(Uint128Low64(value));
+    // This static_cast is only needed when using a std::bit_width()
+    // implementation that does not have the fix for LWG 3656 applied.
+    return static_cast<int>(bit_width(Uint128Low64(value)));
   }
-  return 128 - base_internal::CountLeadingZeros64(Uint128High64(value));
+  return 128 - countl_zero(Uint128High64(value));
 }
 
 // Calculates how far to the right a mantissa needs to be shifted to create a
@@ -285,14 +357,20 @@
 bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
                     FloatType* value) {
   if (input.type == strings_internal::FloatType::kNan) {
-    // A bug in both clang and gcc would cause the compiler to optimize away the
-    // buffer we are building below.  Declaring the buffer volatile avoids the
-    // issue, and has no measurable performance impact in microbenchmarks.
+    // A bug in both clang < 7 and gcc would cause the compiler to optimize
+    // away the buffer we are building below.  Declaring the buffer volatile
+    // avoids the issue, and has no measurable performance impact in
+    // microbenchmarks.
     //
     // https://bugs.llvm.org/show_bug.cgi?id=37778
     // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86113
     constexpr ptrdiff_t kNanBufferSize = 128;
+#if (defined(__GNUC__) && !defined(__clang__)) || \
+    (defined(__clang__) && __clang_major__ < 7)
     volatile char n_char_sequence[kNanBufferSize];
+#else
+    char n_char_sequence[kNanBufferSize];
+#endif
     if (input.subrange_begin == nullptr) {
       n_char_sequence[0] = '\0';
     } else {
@@ -337,8 +415,10 @@
     *value = negative ? -0.0 : 0.0;
     return;
   }
-  *value = FloatTraits<FloatType>::Make(calculated.mantissa,
-                                        calculated.exponent, negative);
+  *value = FloatTraits<FloatType>::Make(
+      static_cast<typename FloatTraits<FloatType>::mantissa_t>(
+          calculated.mantissa),
+      calculated.exponent, negative);
 }
 
 // Returns the given uint128 shifted to the right by `shift` bits, and rounds
@@ -399,7 +479,7 @@
     // the low bit of `value` is set.
     //
     // In inexact mode, the nonzero error means the actual value is greater
-    // than the halfway point and we must alway round up.
+    // than the halfway point and we must always round up.
     if ((value & 1) == 1 || !input_exact) {
       ++value;
     }
@@ -499,7 +579,7 @@
 template <typename FloatType>
 CalculatedFloat CalculatedFloatFromRawValues(uint64_t mantissa, int exponent) {
   CalculatedFloat result;
-  if (mantissa == uint64_t(1) << FloatTraits<FloatType>::kTargetMantissaBits) {
+  if (mantissa == uint64_t{1} << FloatTraits<FloatType>::kTargetMantissaBits) {
     mantissa >>= 1;
     exponent += 1;
   }
@@ -519,7 +599,9 @@
     const strings_internal::ParsedFloat& parsed_hex) {
   uint64_t mantissa = parsed_hex.mantissa;
   int exponent = parsed_hex.exponent;
-  int mantissa_width = 64 - base_internal::CountLeadingZeros64(mantissa);
+  // This static_cast is only needed when using a std::bit_width()
+  // implementation that does not have the fix for LWG 3656 applied.
+  int mantissa_width = static_cast<int>(bit_width(mantissa));
   const int shift = NormalizedShiftSize<FloatType>(mantissa_width, exponent);
   bool result_exact;
   exponent += shift;
@@ -595,6 +677,185 @@
                                                  binary_exponent);
 }
 
+// As discussed in https://nigeltao.github.io/blog/2020/eisel-lemire.html the
+// primary goal of the Eisel-Lemire algorithm is speed, for 99+% of the cases,
+// not 100% coverage. As long as Eisel-Lemire doesn’t claim false positives,
+// the combined approach (falling back to an alternative implementation when
+// this function returns false) is both fast and correct.
+template <typename FloatType>
+bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
+                 FloatType* value, std::errc* ec) {
+  uint64_t man = input.mantissa;
+  int exp10 = input.exponent;
+  if (exp10 < FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10) {
+    *value = negative ? -0.0 : 0.0;
+    *ec = std::errc::result_out_of_range;
+    return true;
+  } else if (exp10 >= FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10) {
+    // Return max (a finite value) consistent with from_chars and DR 3081. For
+    // SimpleAtod and SimpleAtof, post-processing will return infinity.
+    *value = negative ? -std::numeric_limits<FloatType>::max()
+                      : std::numeric_limits<FloatType>::max();
+    *ec = std::errc::result_out_of_range;
+    return true;
+  }
+
+  // Assert kPower10TableMinInclusive <= exp10 < kPower10TableMaxExclusive.
+  // Equivalently, !Power10Underflow(exp10) and !Power10Overflow(exp10).
+  static_assert(
+      FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10 >=
+          kPower10TableMinInclusive,
+      "(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds");
+  static_assert(
+      FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10 <=
+          kPower10TableMaxExclusive,
+      "(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds");
+
+  // The terse (+) comments in this function body refer to sections of the
+  // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post.
+  //
+  // That blog post discusses double precision (11 exponent bits with a -1023
+  // bias, 52 mantissa bits), but the same approach applies to single precision
+  // (8 exponent bits with a -127 bias, 23 mantissa bits). Either way, the
+  // computation here happens with 64-bit values (e.g. man) or 128-bit values
+  // (e.g. x) before finally converting to 64- or 32-bit floating point.
+  //
+  // See also "Number Parsing at a Gigabyte per Second, Software: Practice and
+  // Experience 51 (8), 2021" (https://arxiv.org/abs/2101.11408) for detail.
+
+  // (+) Normalization.
+  int clz = countl_zero(man);
+  man <<= static_cast<unsigned int>(clz);
+  // The 217706 etc magic numbers are from the Power10Exponent function.
+  uint64_t ret_exp2 =
+      static_cast<uint64_t>((217706 * exp10 >> 16) + 64 +
+                            FloatTraits<FloatType>::kExponentBias - clz);
+
+  // (+) Multiplication.
+  uint128 x = static_cast<uint128>(man) *
+              static_cast<uint128>(
+                  kPower10MantissaHighTable[exp10 - kPower10TableMinInclusive]);
+
+  // (+) Wider Approximation.
+  static constexpr uint64_t high64_mask =
+      FloatTraits<FloatType>::kEiselLemireMask;
+  if (((Uint128High64(x) & high64_mask) == high64_mask) &&
+      (man > (std::numeric_limits<uint64_t>::max() - Uint128Low64(x)))) {
+    uint128 y =
+        static_cast<uint128>(man) *
+        static_cast<uint128>(
+            kPower10MantissaLowTable[exp10 - kPower10TableMinInclusive]);
+    x += Uint128High64(y);
+    // For example, parsing "4503599627370497.5" will take the if-true
+    // branch here (for double precision), since:
+    //  - x   = 0x8000000000000BFF_FFFFFFFFFFFFFFFF
+    //  - y   = 0x8000000000000BFF_7FFFFFFFFFFFF400
+    //  - man = 0xA000000000000F00
+    // Likewise, when parsing "0.0625" for single precision:
+    //  - x   = 0x7FFFFFFFFFFFFFFF_FFFFFFFFFFFFFFFF
+    //  - y   = 0x813FFFFFFFFFFFFF_8A00000000000000
+    //  - man = 0x9C40000000000000
+    if (((Uint128High64(x) & high64_mask) == high64_mask) &&
+        ((Uint128Low64(x) + 1) == 0) &&
+        (man > (std::numeric_limits<uint64_t>::max() - Uint128Low64(y)))) {
+      return false;
+    }
+  }
+
+  // (+) Shifting to 54 Bits (or for single precision, to 25 bits).
+  uint64_t msb = Uint128High64(x) >> 63;
+  uint64_t ret_man =
+      Uint128High64(x) >> (msb + FloatTraits<FloatType>::kEiselLemireShift);
+  ret_exp2 -= 1 ^ msb;
+
+  // (+) Half-way Ambiguity.
+  //
+  // For example, parsing "1e+23" will take the if-true branch here (for double
+  // precision), since:
+  //  - x       = 0x54B40B1F852BDA00_0000000000000000
+  //  - ret_man = 0x002A5A058FC295ED
+  // Likewise, when parsing "20040229.0" for single precision:
+  //  - x       = 0x4C72894000000000_0000000000000000
+  //  - ret_man = 0x000000000131CA25
+  if ((Uint128Low64(x) == 0) && ((Uint128High64(x) & high64_mask) == 0) &&
+      ((ret_man & 3) == 1)) {
+    return false;
+  }
+
+  // (+) From 54 to 53 Bits (or for single precision, from 25 to 24 bits).
+  ret_man += ret_man & 1;  // Line From54a.
+  ret_man >>= 1;           // Line From54b.
+  // Incrementing ret_man (at line From54a) may have overflowed 54 bits (53
+  // bits after the right shift by 1 at line From54b), so adjust for that.
+  //
+  // For example, parsing "9223372036854775807" will take the if-true branch
+  // here (for double precision), since:
+  //  - ret_man = 0x0020000000000000 = (1 << 53)
+  // Likewise, when parsing "2147483647.0" for single precision:
+  //  - ret_man = 0x0000000001000000 = (1 << 24)
+  if ((ret_man >> FloatTraits<FloatType>::kTargetMantissaBits) > 0) {
+    ret_exp2 += 1;
+    // Conceptually, we need a "ret_man >>= 1" in this if-block to balance
+    // incrementing ret_exp2 in the line immediately above. However, we only
+    // get here when line From54a overflowed (after adding a 1), so ret_man
+    // here is (1 << 53). Its low 53 bits are therefore all zeroes. The only
+    // remaining use of ret_man is to mask it with ((1 << 52) - 1), so only its
+    // low 52 bits matter. A "ret_man >>= 1" would have no effect in practice.
+    //
+    // We omit the "ret_man >>= 1", even if it is cheap (and this if-branch is
+    // rarely taken) and technically 'more correct', so that mutation tests
+    // that would otherwise modify or omit that "ret_man >>= 1" don't complain
+    // that such code mutations have no observable effect.
+  }
+
+  // ret_exp2 is a uint64_t. Zero or underflow means that we're in subnormal
+  // space. max_exp2 (0x7FF for double precision, 0xFF for single precision) or
+  // above means that we're in Inf/NaN space.
+  //
+  // The if block is equivalent to (but has fewer branches than):
+  //   if ((ret_exp2 <= 0) || (ret_exp2 >= max_exp2)) { etc }
+  //
+  // For example, parsing "4.9406564584124654e-324" will take the if-true
+  // branch here, since ret_exp2 = -51.
+  static constexpr uint64_t max_exp2 =
+      (1 << FloatTraits<FloatType>::kTargetExponentBits) - 1;
+  if ((ret_exp2 - 1) >= (max_exp2 - 1)) {
+    return false;
+  }
+
+#ifndef ABSL_BIT_PACK_FLOATS
+  if (FloatTraits<FloatType>::kTargetBits == 64) {
+    *value = FloatTraits<FloatType>::Make(
+        (ret_man & 0x000FFFFFFFFFFFFFu) | 0x0010000000000000u,
+        static_cast<int>(ret_exp2) - 1023 - 52, negative);
+    return true;
+  } else if (FloatTraits<FloatType>::kTargetBits == 32) {
+    *value = FloatTraits<FloatType>::Make(
+        (static_cast<uint32_t>(ret_man) & 0x007FFFFFu) | 0x00800000u,
+        static_cast<int>(ret_exp2) - 127 - 23, negative);
+    return true;
+  }
+#else
+  if (FloatTraits<FloatType>::kTargetBits == 64) {
+    uint64_t ret_bits = (ret_exp2 << 52) | (ret_man & 0x000FFFFFFFFFFFFFu);
+    if (negative) {
+      ret_bits |= 0x8000000000000000u;
+    }
+    *value = absl::bit_cast<double>(ret_bits);
+    return true;
+  } else if (FloatTraits<FloatType>::kTargetBits == 32) {
+    uint32_t ret_bits = (static_cast<uint32_t>(ret_exp2) << 23) |
+                        (static_cast<uint32_t>(ret_man) & 0x007FFFFFu);
+    if (negative) {
+      ret_bits |= 0x80000000u;
+    }
+    *value = absl::bit_cast<float>(ret_bits);
+    return true;
+  }
+#endif  // ABSL_BIT_PACK_FLOATS
+  return false;
+}
+
 template <typename FloatType>
 from_chars_result FromCharsImpl(const char* first, const char* last,
                                 FloatType& value, chars_format fmt_flags) {
@@ -668,6 +929,12 @@
     if (HandleEdgeCase(decimal_parse, negative, &value)) {
       return result;
     }
+    // A nullptr subrange_begin means that the decimal_parse.mantissa is exact
+    // (not truncated), a precondition of the Eisel-Lemire algorithm.
+    if ((decimal_parse.subrange_begin == nullptr) &&
+        EiselLemire<FloatType>(decimal_parse, negative, &value, &result.ec)) {
+      return result;
+    }
     CalculatedFloat calculated =
         CalculateFromParsedDecimal<FloatType>(decimal_parse);
     EncodeResult(calculated, negative, &result, &value);
@@ -688,15 +955,46 @@
 
 namespace {
 
-// Table of powers of 10, from kPower10TableMin to kPower10TableMax.
+// Table of powers of 10, from kPower10TableMinInclusive to
+// kPower10TableMaxExclusive.
 //
-// kPower10MantissaTable[i - kPower10TableMin] stores the 64-bit mantissa (high
-// bit always on), and kPower10ExponentTable[i - kPower10TableMin] stores the
-// power-of-two exponent.  For a given number i, this gives the unique mantissa
-// and exponent such that mantissa * 2**exponent <= 10**i < (mantissa + 1) *
-// 2**exponent.
+// kPower10MantissaHighTable[i - kPower10TableMinInclusive] stores the 64-bit
+// mantissa. The high bit is always on.
+//
+// kPower10MantissaLowTable extends that 64-bit mantissa to 128 bits.
+//
+// Power10Exponent(i) calculates the power-of-two exponent.
+//
+// For a number i, this gives the unique mantissaHigh and exponent such that
+// (mantissaHigh * 2**exponent) <= 10**i < ((mantissaHigh + 1) * 2**exponent).
+//
+// For example, Python can confirm that the exact hexadecimal value of 1e60 is:
+//    >>> a = 1000000000000000000000000000000000000000000000000000000000000
+//    >>> hex(a)
+//    '0x9f4f2726179a224501d762422c946590d91000000000000000'
+// Adding underscores at every 8th hex digit shows 50 hex digits:
+//    '0x9f4f2726_179a2245_01d76242_2c946590_d9100000_00000000_00'.
+// In this case, the high bit of the first hex digit, 9, is coincidentally set,
+// so we do not have to do further shifting to deduce the 128-bit mantissa:
+//   - kPower10MantissaHighTable[60 - kP10TMI] = 0x9f4f2726179a2245U
+//   - kPower10MantissaLowTable[ 60 - kP10TMI] = 0x01d762422c946590U
+// where kP10TMI is kPower10TableMinInclusive. The low 18 of those 50 hex
+// digits are truncated.
+//
+// 50 hex digits (with the high bit set) is 200 bits and mantissaHigh holds 64
+// bits, so Power10Exponent(60) = 200 - 64 = 136. Again, Python can confirm:
+//    >>> b = 0x9f4f2726179a2245
+//    >>> ((b+0)<<136) <= a
+//    True
+//    >>> ((b+1)<<136) <= a
+//    False
+//
+// The tables were generated by
+// https://github.com/google/wuffs/blob/315b2e52625ebd7b02d8fac13e3cd85ea374fb80/script/print-mpb-powers-of-10.go
+// after re-formatting its output into two arrays of N uint64_t values (instead
+// of an N element array of uint64_t pairs).
 
-const uint64_t kPower10MantissaTable[] = {
+const uint64_t kPower10MantissaHighTable[] = {
     0xeef453d6923bd65aU, 0x9558b4661b6565f8U, 0xbaaee17fa23ebf76U,
     0xe95a99df8ace6f53U, 0x91d8a02bb6c10594U, 0xb64ec836a47146f9U,
     0xe3e27a444d8d98b7U, 0x8e6d8c6ab0787f72U, 0xb208ef855c969f4fU,
@@ -916,67 +1214,224 @@
     0xb6472e511c81471dU, 0xe3d8f9e563a198e5U, 0x8e679c2f5e44ff8fU,
 };
 
-const int16_t kPower10ExponentTable[] = {
-    -1200, -1196, -1193, -1190, -1186, -1183, -1180, -1176, -1173, -1170, -1166,
-    -1163, -1160, -1156, -1153, -1150, -1146, -1143, -1140, -1136, -1133, -1130,
-    -1127, -1123, -1120, -1117, -1113, -1110, -1107, -1103, -1100, -1097, -1093,
-    -1090, -1087, -1083, -1080, -1077, -1073, -1070, -1067, -1063, -1060, -1057,
-    -1053, -1050, -1047, -1043, -1040, -1037, -1034, -1030, -1027, -1024, -1020,
-    -1017, -1014, -1010, -1007, -1004, -1000, -997,  -994,  -990,  -987,  -984,
-    -980,  -977,  -974,  -970,  -967,  -964,  -960,  -957,  -954,  -950,  -947,
-    -944,  -940,  -937,  -934,  -931,  -927,  -924,  -921,  -917,  -914,  -911,
-    -907,  -904,  -901,  -897,  -894,  -891,  -887,  -884,  -881,  -877,  -874,
-    -871,  -867,  -864,  -861,  -857,  -854,  -851,  -847,  -844,  -841,  -838,
-    -834,  -831,  -828,  -824,  -821,  -818,  -814,  -811,  -808,  -804,  -801,
-    -798,  -794,  -791,  -788,  -784,  -781,  -778,  -774,  -771,  -768,  -764,
-    -761,  -758,  -754,  -751,  -748,  -744,  -741,  -738,  -735,  -731,  -728,
-    -725,  -721,  -718,  -715,  -711,  -708,  -705,  -701,  -698,  -695,  -691,
-    -688,  -685,  -681,  -678,  -675,  -671,  -668,  -665,  -661,  -658,  -655,
-    -651,  -648,  -645,  -642,  -638,  -635,  -632,  -628,  -625,  -622,  -618,
-    -615,  -612,  -608,  -605,  -602,  -598,  -595,  -592,  -588,  -585,  -582,
-    -578,  -575,  -572,  -568,  -565,  -562,  -558,  -555,  -552,  -549,  -545,
-    -542,  -539,  -535,  -532,  -529,  -525,  -522,  -519,  -515,  -512,  -509,
-    -505,  -502,  -499,  -495,  -492,  -489,  -485,  -482,  -479,  -475,  -472,
-    -469,  -465,  -462,  -459,  -455,  -452,  -449,  -446,  -442,  -439,  -436,
-    -432,  -429,  -426,  -422,  -419,  -416,  -412,  -409,  -406,  -402,  -399,
-    -396,  -392,  -389,  -386,  -382,  -379,  -376,  -372,  -369,  -366,  -362,
-    -359,  -356,  -353,  -349,  -346,  -343,  -339,  -336,  -333,  -329,  -326,
-    -323,  -319,  -316,  -313,  -309,  -306,  -303,  -299,  -296,  -293,  -289,
-    -286,  -283,  -279,  -276,  -273,  -269,  -266,  -263,  -259,  -256,  -253,
-    -250,  -246,  -243,  -240,  -236,  -233,  -230,  -226,  -223,  -220,  -216,
-    -213,  -210,  -206,  -203,  -200,  -196,  -193,  -190,  -186,  -183,  -180,
-    -176,  -173,  -170,  -166,  -163,  -160,  -157,  -153,  -150,  -147,  -143,
-    -140,  -137,  -133,  -130,  -127,  -123,  -120,  -117,  -113,  -110,  -107,
-    -103,  -100,  -97,   -93,   -90,   -87,   -83,   -80,   -77,   -73,   -70,
-    -67,   -63,   -60,   -57,   -54,   -50,   -47,   -44,   -40,   -37,   -34,
-    -30,   -27,   -24,   -20,   -17,   -14,   -10,   -7,    -4,    0,     3,
-    6,     10,    13,    16,    20,    23,    26,    30,    33,    36,    39,
-    43,    46,    49,    53,    56,    59,    63,    66,    69,    73,    76,
-    79,    83,    86,    89,    93,    96,    99,    103,   106,   109,   113,
-    116,   119,   123,   126,   129,   132,   136,   139,   142,   146,   149,
-    152,   156,   159,   162,   166,   169,   172,   176,   179,   182,   186,
-    189,   192,   196,   199,   202,   206,   209,   212,   216,   219,   222,
-    226,   229,   232,   235,   239,   242,   245,   249,   252,   255,   259,
-    262,   265,   269,   272,   275,   279,   282,   285,   289,   292,   295,
-    299,   302,   305,   309,   312,   315,   319,   322,   325,   328,   332,
-    335,   338,   342,   345,   348,   352,   355,   358,   362,   365,   368,
-    372,   375,   378,   382,   385,   388,   392,   395,   398,   402,   405,
-    408,   412,   415,   418,   422,   425,   428,   431,   435,   438,   441,
-    445,   448,   451,   455,   458,   461,   465,   468,   471,   475,   478,
-    481,   485,   488,   491,   495,   498,   501,   505,   508,   511,   515,
-    518,   521,   524,   528,   531,   534,   538,   541,   544,   548,   551,
-    554,   558,   561,   564,   568,   571,   574,   578,   581,   584,   588,
-    591,   594,   598,   601,   604,   608,   611,   614,   617,   621,   624,
-    627,   631,   634,   637,   641,   644,   647,   651,   654,   657,   661,
-    664,   667,   671,   674,   677,   681,   684,   687,   691,   694,   697,
-    701,   704,   707,   711,   714,   717,   720,   724,   727,   730,   734,
-    737,   740,   744,   747,   750,   754,   757,   760,   764,   767,   770,
-    774,   777,   780,   784,   787,   790,   794,   797,   800,   804,   807,
-    810,   813,   817,   820,   823,   827,   830,   833,   837,   840,   843,
-    847,   850,   853,   857,   860,   863,   867,   870,   873,   877,   880,
-    883,   887,   890,   893,   897,   900,   903,   907,   910,   913,   916,
-    920,   923,   926,   930,   933,   936,   940,   943,   946,   950,   953,
-    956,   960,
+const uint64_t kPower10MantissaLowTable[] = {
+    0x113faa2906a13b3fU, 0x4ac7ca59a424c507U, 0x5d79bcf00d2df649U,
+    0xf4d82c2c107973dcU, 0x79071b9b8a4be869U, 0x9748e2826cdee284U,
+    0xfd1b1b2308169b25U, 0xfe30f0f5e50e20f7U, 0xbdbd2d335e51a935U,
+    0xad2c788035e61382U, 0x4c3bcb5021afcc31U, 0xdf4abe242a1bbf3dU,
+    0xd71d6dad34a2af0dU, 0x8672648c40e5ad68U, 0x680efdaf511f18c2U,
+    0x0212bd1b2566def2U, 0x014bb630f7604b57U, 0x419ea3bd35385e2dU,
+    0x52064cac828675b9U, 0x7343efebd1940993U, 0x1014ebe6c5f90bf8U,
+    0xd41a26e077774ef6U, 0x8920b098955522b4U, 0x55b46e5f5d5535b0U,
+    0xeb2189f734aa831dU, 0xa5e9ec7501d523e4U, 0x47b233c92125366eU,
+    0x999ec0bb696e840aU, 0xc00670ea43ca250dU, 0x380406926a5e5728U,
+    0xc605083704f5ecf2U, 0xf7864a44c633682eU, 0x7ab3ee6afbe0211dU,
+    0x5960ea05bad82964U, 0x6fb92487298e33bdU, 0xa5d3b6d479f8e056U,
+    0x8f48a4899877186cU, 0x331acdabfe94de87U, 0x9ff0c08b7f1d0b14U,
+    0x07ecf0ae5ee44dd9U, 0xc9e82cd9f69d6150U, 0xbe311c083a225cd2U,
+    0x6dbd630a48aaf406U, 0x092cbbccdad5b108U, 0x25bbf56008c58ea5U,
+    0xaf2af2b80af6f24eU, 0x1af5af660db4aee1U, 0x50d98d9fc890ed4dU,
+    0xe50ff107bab528a0U, 0x1e53ed49a96272c8U, 0x25e8e89c13bb0f7aU,
+    0x77b191618c54e9acU, 0xd59df5b9ef6a2417U, 0x4b0573286b44ad1dU,
+    0x4ee367f9430aec32U, 0x229c41f793cda73fU, 0x6b43527578c1110fU,
+    0x830a13896b78aaa9U, 0x23cc986bc656d553U, 0x2cbfbe86b7ec8aa8U,
+    0x7bf7d71432f3d6a9U, 0xdaf5ccd93fb0cc53U, 0xd1b3400f8f9cff68U,
+    0x23100809b9c21fa1U, 0xabd40a0c2832a78aU, 0x16c90c8f323f516cU,
+    0xae3da7d97f6792e3U, 0x99cd11cfdf41779cU, 0x40405643d711d583U,
+    0x482835ea666b2572U, 0xda3243650005eecfU, 0x90bed43e40076a82U,
+    0x5a7744a6e804a291U, 0x711515d0a205cb36U, 0x0d5a5b44ca873e03U,
+    0xe858790afe9486c2U, 0x626e974dbe39a872U, 0xfb0a3d212dc8128fU,
+    0x7ce66634bc9d0b99U, 0x1c1fffc1ebc44e80U, 0xa327ffb266b56220U,
+    0x4bf1ff9f0062baa8U, 0x6f773fc3603db4a9U, 0xcb550fb4384d21d3U,
+    0x7e2a53a146606a48U, 0x2eda7444cbfc426dU, 0xfa911155fefb5308U,
+    0x793555ab7eba27caU, 0x4bc1558b2f3458deU, 0x9eb1aaedfb016f16U,
+    0x465e15a979c1cadcU, 0x0bfacd89ec191ec9U, 0xcef980ec671f667bU,
+    0x82b7e12780e7401aU, 0xd1b2ecb8b0908810U, 0x861fa7e6dcb4aa15U,
+    0x67a791e093e1d49aU, 0xe0c8bb2c5c6d24e0U, 0x58fae9f773886e18U,
+    0xaf39a475506a899eU, 0x6d8406c952429603U, 0xc8e5087ba6d33b83U,
+    0xfb1e4a9a90880a64U, 0x5cf2eea09a55067fU, 0xf42faa48c0ea481eU,
+    0xf13b94daf124da26U, 0x76c53d08d6b70858U, 0x54768c4b0c64ca6eU,
+    0xa9942f5dcf7dfd09U, 0xd3f93b35435d7c4cU, 0xc47bc5014a1a6dafU,
+    0x359ab6419ca1091bU, 0xc30163d203c94b62U, 0x79e0de63425dcf1dU,
+    0x985915fc12f542e4U, 0x3e6f5b7b17b2939dU, 0xa705992ceecf9c42U,
+    0x50c6ff782a838353U, 0xa4f8bf5635246428U, 0x871b7795e136be99U,
+    0x28e2557b59846e3fU, 0x331aeada2fe589cfU, 0x3ff0d2c85def7621U,
+    0x0fed077a756b53a9U, 0xd3e8495912c62894U, 0x64712dd7abbbd95cU,
+    0xbd8d794d96aacfb3U, 0xecf0d7a0fc5583a0U, 0xf41686c49db57244U,
+    0x311c2875c522ced5U, 0x7d633293366b828bU, 0xae5dff9c02033197U,
+    0xd9f57f830283fdfcU, 0xd072df63c324fd7bU, 0x4247cb9e59f71e6dU,
+    0x52d9be85f074e608U, 0x67902e276c921f8bU, 0x00ba1cd8a3db53b6U,
+    0x80e8a40eccd228a4U, 0x6122cd128006b2cdU, 0x796b805720085f81U,
+    0xcbe3303674053bb0U, 0xbedbfc4411068a9cU, 0xee92fb5515482d44U,
+    0x751bdd152d4d1c4aU, 0xd262d45a78a0635dU, 0x86fb897116c87c34U,
+    0xd45d35e6ae3d4da0U, 0x8974836059cca109U, 0x2bd1a438703fc94bU,
+    0x7b6306a34627ddcfU, 0x1a3bc84c17b1d542U, 0x20caba5f1d9e4a93U,
+    0x547eb47b7282ee9cU, 0xe99e619a4f23aa43U, 0x6405fa00e2ec94d4U,
+    0xde83bc408dd3dd04U, 0x9624ab50b148d445U, 0x3badd624dd9b0957U,
+    0xe54ca5d70a80e5d6U, 0x5e9fcf4ccd211f4cU, 0x7647c3200069671fU,
+    0x29ecd9f40041e073U, 0xf468107100525890U, 0x7182148d4066eeb4U,
+    0xc6f14cd848405530U, 0xb8ada00e5a506a7cU, 0xa6d90811f0e4851cU,
+    0x908f4a166d1da663U, 0x9a598e4e043287feU, 0x40eff1e1853f29fdU,
+    0xd12bee59e68ef47cU, 0x82bb74f8301958ceU, 0xe36a52363c1faf01U,
+    0xdc44e6c3cb279ac1U, 0x29ab103a5ef8c0b9U, 0x7415d448f6b6f0e7U,
+    0x111b495b3464ad21U, 0xcab10dd900beec34U, 0x3d5d514f40eea742U,
+    0x0cb4a5a3112a5112U, 0x47f0e785eaba72abU, 0x59ed216765690f56U,
+    0x306869c13ec3532cU, 0x1e414218c73a13fbU, 0xe5d1929ef90898faU,
+    0xdf45f746b74abf39U, 0x6b8bba8c328eb783U, 0x066ea92f3f326564U,
+    0xc80a537b0efefebdU, 0xbd06742ce95f5f36U, 0x2c48113823b73704U,
+    0xf75a15862ca504c5U, 0x9a984d73dbe722fbU, 0xc13e60d0d2e0ebbaU,
+    0x318df905079926a8U, 0xfdf17746497f7052U, 0xfeb6ea8bedefa633U,
+    0xfe64a52ee96b8fc0U, 0x3dfdce7aa3c673b0U, 0x06bea10ca65c084eU,
+    0x486e494fcff30a62U, 0x5a89dba3c3efccfaU, 0xf89629465a75e01cU,
+    0xf6bbb397f1135823U, 0x746aa07ded582e2cU, 0xa8c2a44eb4571cdcU,
+    0x92f34d62616ce413U, 0x77b020baf9c81d17U, 0x0ace1474dc1d122eU,
+    0x0d819992132456baU, 0x10e1fff697ed6c69U, 0xca8d3ffa1ef463c1U,
+    0xbd308ff8a6b17cb2U, 0xac7cb3f6d05ddbdeU, 0x6bcdf07a423aa96bU,
+    0x86c16c98d2c953c6U, 0xe871c7bf077ba8b7U, 0x11471cd764ad4972U,
+    0xd598e40d3dd89bcfU, 0x4aff1d108d4ec2c3U, 0xcedf722a585139baU,
+    0xc2974eb4ee658828U, 0x733d226229feea32U, 0x0806357d5a3f525fU,
+    0xca07c2dcb0cf26f7U, 0xfc89b393dd02f0b5U, 0xbbac2078d443ace2U,
+    0xd54b944b84aa4c0dU, 0x0a9e795e65d4df11U, 0x4d4617b5ff4a16d5U,
+    0x504bced1bf8e4e45U, 0xe45ec2862f71e1d6U, 0x5d767327bb4e5a4cU,
+    0x3a6a07f8d510f86fU, 0x890489f70a55368bU, 0x2b45ac74ccea842eU,
+    0x3b0b8bc90012929dU, 0x09ce6ebb40173744U, 0xcc420a6a101d0515U,
+    0x9fa946824a12232dU, 0x47939822dc96abf9U, 0x59787e2b93bc56f7U,
+    0x57eb4edb3c55b65aU, 0xede622920b6b23f1U, 0xe95fab368e45ecedU,
+    0x11dbcb0218ebb414U, 0xd652bdc29f26a119U, 0x4be76d3346f0495fU,
+    0x6f70a4400c562ddbU, 0xcb4ccd500f6bb952U, 0x7e2000a41346a7a7U,
+    0x8ed400668c0c28c8U, 0x728900802f0f32faU, 0x4f2b40a03ad2ffb9U,
+    0xe2f610c84987bfa8U, 0x0dd9ca7d2df4d7c9U, 0x91503d1c79720dbbU,
+    0x75a44c6397ce912aU, 0xc986afbe3ee11abaU, 0xfbe85badce996168U,
+    0xfae27299423fb9c3U, 0xdccd879fc967d41aU, 0x5400e987bbc1c920U,
+    0x290123e9aab23b68U, 0xf9a0b6720aaf6521U, 0xf808e40e8d5b3e69U,
+    0xb60b1d1230b20e04U, 0xb1c6f22b5e6f48c2U, 0x1e38aeb6360b1af3U,
+    0x25c6da63c38de1b0U, 0x579c487e5a38ad0eU, 0x2d835a9df0c6d851U,
+    0xf8e431456cf88e65U, 0x1b8e9ecb641b58ffU, 0xe272467e3d222f3fU,
+    0x5b0ed81dcc6abb0fU, 0x98e947129fc2b4e9U, 0x3f2398d747b36224U,
+    0x8eec7f0d19a03aadU, 0x1953cf68300424acU, 0x5fa8c3423c052dd7U,
+    0x3792f412cb06794dU, 0xe2bbd88bbee40bd0U, 0x5b6aceaeae9d0ec4U,
+    0xf245825a5a445275U, 0xeed6e2f0f0d56712U, 0x55464dd69685606bU,
+    0xaa97e14c3c26b886U, 0xd53dd99f4b3066a8U, 0xe546a8038efe4029U,
+    0xde98520472bdd033U, 0x963e66858f6d4440U, 0xdde7001379a44aa8U,
+    0x5560c018580d5d52U, 0xaab8f01e6e10b4a6U, 0xcab3961304ca70e8U,
+    0x3d607b97c5fd0d22U, 0x8cb89a7db77c506aU, 0x77f3608e92adb242U,
+    0x55f038b237591ed3U, 0x6b6c46dec52f6688U, 0x2323ac4b3b3da015U,
+    0xabec975e0a0d081aU, 0x96e7bd358c904a21U, 0x7e50d64177da2e54U,
+    0xdde50bd1d5d0b9e9U, 0x955e4ec64b44e864U, 0xbd5af13bef0b113eU,
+    0xecb1ad8aeacdd58eU, 0x67de18eda5814af2U, 0x80eacf948770ced7U,
+    0xa1258379a94d028dU, 0x096ee45813a04330U, 0x8bca9d6e188853fcU,
+    0x775ea264cf55347dU, 0x95364afe032a819dU, 0x3a83ddbd83f52204U,
+    0xc4926a9672793542U, 0x75b7053c0f178293U, 0x5324c68b12dd6338U,
+    0xd3f6fc16ebca5e03U, 0x88f4bb1ca6bcf584U, 0x2b31e9e3d06c32e5U,
+    0x3aff322e62439fcfU, 0x09befeb9fad487c2U, 0x4c2ebe687989a9b3U,
+    0x0f9d37014bf60a10U, 0x538484c19ef38c94U, 0x2865a5f206b06fb9U,
+    0xf93f87b7442e45d3U, 0xf78f69a51539d748U, 0xb573440e5a884d1bU,
+    0x31680a88f8953030U, 0xfdc20d2b36ba7c3dU, 0x3d32907604691b4cU,
+    0xa63f9a49c2c1b10fU, 0x0fcf80dc33721d53U, 0xd3c36113404ea4a8U,
+    0x645a1cac083126e9U, 0x3d70a3d70a3d70a3U, 0xccccccccccccccccU,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
+    0x0000000000000000U, 0x4000000000000000U, 0x5000000000000000U,
+    0xa400000000000000U, 0x4d00000000000000U, 0xf020000000000000U,
+    0x6c28000000000000U, 0xc732000000000000U, 0x3c7f400000000000U,
+    0x4b9f100000000000U, 0x1e86d40000000000U, 0x1314448000000000U,
+    0x17d955a000000000U, 0x5dcfab0800000000U, 0x5aa1cae500000000U,
+    0xf14a3d9e40000000U, 0x6d9ccd05d0000000U, 0xe4820023a2000000U,
+    0xdda2802c8a800000U, 0xd50b2037ad200000U, 0x4526f422cc340000U,
+    0x9670b12b7f410000U, 0x3c0cdd765f114000U, 0xa5880a69fb6ac800U,
+    0x8eea0d047a457a00U, 0x72a4904598d6d880U, 0x47a6da2b7f864750U,
+    0x999090b65f67d924U, 0xfff4b4e3f741cf6dU, 0xbff8f10e7a8921a4U,
+    0xaff72d52192b6a0dU, 0x9bf4f8a69f764490U, 0x02f236d04753d5b4U,
+    0x01d762422c946590U, 0x424d3ad2b7b97ef5U, 0xd2e0898765a7deb2U,
+    0x63cc55f49f88eb2fU, 0x3cbf6b71c76b25fbU, 0x8bef464e3945ef7aU,
+    0x97758bf0e3cbb5acU, 0x3d52eeed1cbea317U, 0x4ca7aaa863ee4bddU,
+    0x8fe8caa93e74ef6aU, 0xb3e2fd538e122b44U, 0x60dbbca87196b616U,
+    0xbc8955e946fe31cdU, 0x6babab6398bdbe41U, 0xc696963c7eed2dd1U,
+    0xfc1e1de5cf543ca2U, 0x3b25a55f43294bcbU, 0x49ef0eb713f39ebeU,
+    0x6e3569326c784337U, 0x49c2c37f07965404U, 0xdc33745ec97be906U,
+    0x69a028bb3ded71a3U, 0xc40832ea0d68ce0cU, 0xf50a3fa490c30190U,
+    0x792667c6da79e0faU, 0x577001b891185938U, 0xed4c0226b55e6f86U,
+    0x544f8158315b05b4U, 0x696361ae3db1c721U, 0x03bc3a19cd1e38e9U,
+    0x04ab48a04065c723U, 0x62eb0d64283f9c76U, 0x3ba5d0bd324f8394U,
+    0xca8f44ec7ee36479U, 0x7e998b13cf4e1ecbU, 0x9e3fedd8c321a67eU,
+    0xc5cfe94ef3ea101eU, 0xbba1f1d158724a12U, 0x2a8a6e45ae8edc97U,
+    0xf52d09d71a3293bdU, 0x593c2626705f9c56U, 0x6f8b2fb00c77836cU,
+    0x0b6dfb9c0f956447U, 0x4724bd4189bd5eacU, 0x58edec91ec2cb657U,
+    0x2f2967b66737e3edU, 0xbd79e0d20082ee74U, 0xecd8590680a3aa11U,
+    0xe80e6f4820cc9495U, 0x3109058d147fdcddU, 0xbd4b46f0599fd415U,
+    0x6c9e18ac7007c91aU, 0x03e2cf6bc604ddb0U, 0x84db8346b786151cU,
+    0xe612641865679a63U, 0x4fcb7e8f3f60c07eU, 0xe3be5e330f38f09dU,
+    0x5cadf5bfd3072cc5U, 0x73d9732fc7c8f7f6U, 0x2867e7fddcdd9afaU,
+    0xb281e1fd541501b8U, 0x1f225a7ca91a4226U, 0x3375788de9b06958U,
+    0x0052d6b1641c83aeU, 0xc0678c5dbd23a49aU, 0xf840b7ba963646e0U,
+    0xb650e5a93bc3d898U, 0xa3e51f138ab4cebeU, 0xc66f336c36b10137U,
+    0xb80b0047445d4184U, 0xa60dc059157491e5U, 0x87c89837ad68db2fU,
+    0x29babe4598c311fbU, 0xf4296dd6fef3d67aU, 0x1899e4a65f58660cU,
+    0x5ec05dcff72e7f8fU, 0x76707543f4fa1f73U, 0x6a06494a791c53a8U,
+    0x0487db9d17636892U, 0x45a9d2845d3c42b6U, 0x0b8a2392ba45a9b2U,
+    0x8e6cac7768d7141eU, 0x3207d795430cd926U, 0x7f44e6bd49e807b8U,
+    0x5f16206c9c6209a6U, 0x36dba887c37a8c0fU, 0xc2494954da2c9789U,
+    0xf2db9baa10b7bd6cU, 0x6f92829494e5acc7U, 0xcb772339ba1f17f9U,
+    0xff2a760414536efbU, 0xfef5138519684abaU, 0x7eb258665fc25d69U,
+    0xef2f773ffbd97a61U, 0xaafb550ffacfd8faU, 0x95ba2a53f983cf38U,
+    0xdd945a747bf26183U, 0x94f971119aeef9e4U, 0x7a37cd5601aab85dU,
+    0xac62e055c10ab33aU, 0x577b986b314d6009U, 0xed5a7e85fda0b80bU,
+    0x14588f13be847307U, 0x596eb2d8ae258fc8U, 0x6fca5f8ed9aef3bbU,
+    0x25de7bb9480d5854U, 0xaf561aa79a10ae6aU, 0x1b2ba1518094da04U,
+    0x90fb44d2f05d0842U, 0x353a1607ac744a53U, 0x42889b8997915ce8U,
+    0x69956135febada11U, 0x43fab9837e699095U, 0x94f967e45e03f4bbU,
+    0x1d1be0eebac278f5U, 0x6462d92a69731732U, 0x7d7b8f7503cfdcfeU,
+    0x5cda735244c3d43eU, 0x3a0888136afa64a7U, 0x088aaa1845b8fdd0U,
+    0x8aad549e57273d45U, 0x36ac54e2f678864bU, 0x84576a1bb416a7ddU,
+    0x656d44a2a11c51d5U, 0x9f644ae5a4b1b325U, 0x873d5d9f0dde1feeU,
+    0xa90cb506d155a7eaU, 0x09a7f12442d588f2U, 0x0c11ed6d538aeb2fU,
+    0x8f1668c8a86da5faU, 0xf96e017d694487bcU, 0x37c981dcc395a9acU,
+    0x85bbe253f47b1417U, 0x93956d7478ccec8eU, 0x387ac8d1970027b2U,
+    0x06997b05fcc0319eU, 0x441fece3bdf81f03U, 0xd527e81cad7626c3U,
+    0x8a71e223d8d3b074U, 0xf6872d5667844e49U, 0xb428f8ac016561dbU,
+    0xe13336d701beba52U, 0xecc0024661173473U, 0x27f002d7f95d0190U,
+    0x31ec038df7b441f4U, 0x7e67047175a15271U, 0x0f0062c6e984d386U,
+    0x52c07b78a3e60868U, 0xa7709a56ccdf8a82U, 0x88a66076400bb691U,
+    0x6acff893d00ea435U, 0x0583f6b8c4124d43U, 0xc3727a337a8b704aU,
+    0x744f18c0592e4c5cU, 0x1162def06f79df73U, 0x8addcb5645ac2ba8U,
+    0x6d953e2bd7173692U, 0xc8fa8db6ccdd0437U, 0x1d9c9892400a22a2U,
+    0x2503beb6d00cab4bU, 0x2e44ae64840fd61dU, 0x5ceaecfed289e5d2U,
+    0x7425a83e872c5f47U, 0xd12f124e28f77719U, 0x82bd6b70d99aaa6fU,
+    0x636cc64d1001550bU, 0x3c47f7e05401aa4eU, 0x65acfaec34810a71U,
+    0x7f1839a741a14d0dU, 0x1ede48111209a050U, 0x934aed0aab460432U,
+    0xf81da84d5617853fU, 0x36251260ab9d668eU, 0xc1d72b7c6b426019U,
+    0xb24cf65b8612f81fU, 0xdee033f26797b627U, 0x169840ef017da3b1U,
+    0x8e1f289560ee864eU, 0xf1a6f2bab92a27e2U, 0xae10af696774b1dbU,
+    0xacca6da1e0a8ef29U, 0x17fd090a58d32af3U, 0xddfc4b4cef07f5b0U,
+    0x4abdaf101564f98eU, 0x9d6d1ad41abe37f1U, 0x84c86189216dc5edU,
+    0x32fd3cf5b4e49bb4U, 0x3fbc8c33221dc2a1U, 0x0fabaf3feaa5334aU,
+    0x29cb4d87f2a7400eU, 0x743e20e9ef511012U, 0x914da9246b255416U,
+    0x1ad089b6c2f7548eU, 0xa184ac2473b529b1U, 0xc9e5d72d90a2741eU,
+    0x7e2fa67c7a658892U, 0xddbb901b98feeab7U, 0x552a74227f3ea565U,
+    0xd53a88958f87275fU, 0x8a892abaf368f137U, 0x2d2b7569b0432d85U,
+    0x9c3b29620e29fc73U, 0x8349f3ba91b47b8fU, 0x241c70a936219a73U,
+    0xed238cd383aa0110U, 0xf4363804324a40aaU, 0xb143c6053edcd0d5U,
+    0xdd94b7868e94050aU, 0xca7cf2b4191c8326U, 0xfd1c2f611f63a3f0U,
+    0xbc633b39673c8cecU, 0xd5be0503e085d813U, 0x4b2d8644d8a74e18U,
+    0xddf8e7d60ed1219eU, 0xcabb90e5c942b503U, 0x3d6a751f3b936243U,
+    0x0cc512670a783ad4U, 0x27fb2b80668b24c5U, 0xb1f9f660802dedf6U,
+    0x5e7873f8a0396973U, 0xdb0b487b6423e1e8U, 0x91ce1a9a3d2cda62U,
+    0x7641a140cc7810fbU, 0xa9e904c87fcb0a9dU, 0x546345fa9fbdcd44U,
+    0xa97c177947ad4095U, 0x49ed8eabcccc485dU, 0x5c68f256bfff5a74U,
+    0x73832eec6fff3111U, 0xc831fd53c5ff7eabU, 0xba3e7ca8b77f5e55U,
+    0x28ce1bd2e55f35ebU, 0x7980d163cf5b81b3U, 0xd7e105bcc332621fU,
+    0x8dd9472bf3fefaa7U, 0xb14f98f6f0feb951U, 0x6ed1bf9a569f33d3U,
+    0x0a862f80ec4700c8U, 0xcd27bb612758c0faU, 0x8038d51cb897789cU,
+    0xe0470a63e6bd56c3U, 0x1858ccfce06cac74U, 0x0f37801e0c43ebc8U,
+    0xd30560258f54e6baU, 0x47c6b82ef32a2069U, 0x4cdc331d57fa5441U,
+    0xe0133fe4adf8e952U, 0x58180fddd97723a6U, 0x570f09eaa7ea7648U,
 };
 
 }  // namespace
diff --git a/abseil-cpp/absl/strings/charconv.h b/abseil-cpp/absl/strings/charconv.h
index e04be32..111c712 100644
--- a/abseil-cpp/absl/strings/charconv.h
+++ b/abseil-cpp/absl/strings/charconv.h
@@ -22,7 +22,7 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-// Workalike compatibilty version of std::chars_format from C++17.
+// Workalike compatibility version of std::chars_format from C++17.
 //
 // This is an bitfield enumerator which can be passed to absl::from_chars to
 // configure the string-to-float conversion.
@@ -48,7 +48,7 @@
   std::errc ec;
 };
 
-// Workalike compatibilty version of std::from_chars from C++17.  Currently
+// Workalike compatibility version of std::from_chars from C++17.  Currently
 // this only supports the `double` and `float` types.
 //
 // This interface incorporates the proposed resolutions for library issues
@@ -64,8 +64,9 @@
 // the result in `value`.
 //
 // The matching pattern format is almost the same as that of strtod(), except
-// that C locale is not respected, and an initial '+' character in the input
-// range will never be matched.
+// that (1) C locale is not respected, (2) an initial '+' character in the
+// input range will never be matched, and (3) leading whitespaces are not
+// ignored.
 //
 // If `fmt` is set, it must be one of the enumerator values of the chars_format.
 // (This is despite the fact that chars_format is a bitmask type.)  If set to
diff --git a/abseil-cpp/absl/strings/charconv_test.cc b/abseil-cpp/absl/strings/charconv_test.cc
index 9090e9c..b83de5a 100644
--- a/abseil-cpp/absl/strings/charconv_test.cc
+++ b/abseil-cpp/absl/strings/charconv_test.cc
@@ -653,7 +653,9 @@
                      negative_from_chars_float);
     EXPECT_TRUE(std::signbit(negative_from_chars_float));
     EXPECT_FALSE(Identical(negative_from_chars_float, from_chars_float));
-    from_chars_float = std::copysign(from_chars_float, -1.0);
+    // Use the (float, float) overload of std::copysign to prevent narrowing;
+    // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98251.
+    from_chars_float = std::copysign(from_chars_float, -1.0f);
     EXPECT_TRUE(Identical(negative_from_chars_float, from_chars_float));
   }
 }
diff --git a/abseil-cpp/absl/strings/cord.cc b/abseil-cpp/absl/strings/cord.cc
index 763dcc4..14976ae 100644
--- a/abseil-cpp/absl/strings/cord.cc
+++ b/abseil-cpp/absl/strings/cord.cc
@@ -20,6 +20,7 @@
 #include <cstdio>
 #include <cstdlib>
 #include <iomanip>
+#include <ios>
 #include <iostream>
 #include <limits>
 #include <ostream>
@@ -34,11 +35,19 @@
 #include "absl/base/port.h"
 #include "absl/container/fixed_array.h"
 #include "absl/container/inlined_vector.h"
+#include "absl/crc/internal/crc_cord_state.h"
+#include "absl/strings/cord_buffer.h"
 #include "absl/strings/escaping.h"
+#include "absl/strings/internal/cord_data_edge.h"
 #include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_scope.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
 #include "absl/strings/internal/resize_uninitialized.h"
 #include "absl/strings/str_cat.h"
-#include "absl/strings/str_format.h"
 #include "absl/strings/str_join.h"
 #include "absl/strings/string_view.h"
 
@@ -46,158 +55,21 @@
 ABSL_NAMESPACE_BEGIN
 
 using ::absl::cord_internal::CordRep;
-using ::absl::cord_internal::CordRepConcat;
+using ::absl::cord_internal::CordRepBtree;
+using ::absl::cord_internal::CordRepCrc;
 using ::absl::cord_internal::CordRepExternal;
+using ::absl::cord_internal::CordRepFlat;
 using ::absl::cord_internal::CordRepSubstring;
+using ::absl::cord_internal::CordzUpdateTracker;
+using ::absl::cord_internal::InlineData;
+using ::absl::cord_internal::kMaxFlatLength;
+using ::absl::cord_internal::kMinFlatLength;
 
-// Various representations that we allow
-enum CordRepKind {
-  CONCAT        = 0,
-  EXTERNAL      = 1,
-  SUBSTRING     = 2,
+using ::absl::cord_internal::kInlinedVectorSize;
+using ::absl::cord_internal::kMaxBytesToCopy;
 
-  // We have different tags for different sized flat arrays,
-  // starting with FLAT
-  FLAT          = 3,
-};
-
-namespace cord_internal {
-
-inline CordRepConcat* CordRep::concat() {
-  assert(tag == CONCAT);
-  return static_cast<CordRepConcat*>(this);
-}
-
-inline const CordRepConcat* CordRep::concat() const {
-  assert(tag == CONCAT);
-  return static_cast<const CordRepConcat*>(this);
-}
-
-inline CordRepSubstring* CordRep::substring() {
-  assert(tag == SUBSTRING);
-  return static_cast<CordRepSubstring*>(this);
-}
-
-inline const CordRepSubstring* CordRep::substring() const {
-  assert(tag == SUBSTRING);
-  return static_cast<const CordRepSubstring*>(this);
-}
-
-inline CordRepExternal* CordRep::external() {
-  assert(tag == EXTERNAL);
-  return static_cast<CordRepExternal*>(this);
-}
-
-inline const CordRepExternal* CordRep::external() const {
-  assert(tag == EXTERNAL);
-  return static_cast<const CordRepExternal*>(this);
-}
-
-}  // namespace cord_internal
-
-static const size_t kFlatOverhead = offsetof(CordRep, data);
-
-// Largest and smallest flat node lengths we are willing to allocate
-// Flat allocation size is stored in tag, which currently can encode sizes up
-// to 4K, encoded as multiple of either 8 or 32 bytes.
-// If we allow for larger sizes, we need to change this to 8/64, 16/128, etc.
-static constexpr size_t kMaxFlatSize = 4096;
-static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
-static constexpr size_t kMinFlatLength = 32 - kFlatOverhead;
-
-// Prefer copying blocks of at most this size, otherwise reference count.
-static const size_t kMaxBytesToCopy = 511;
-
-// Helper functions for rounded div, and rounding to exact sizes.
-static size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; }
-static size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; }
-
-// Returns the size to the nearest equal or larger value that can be
-// expressed exactly as a tag value.
-static size_t RoundUpForTag(size_t size) {
-  return RoundUp(size, (size <= 1024) ? 8 : 32);
-}
-
-// Converts the allocated size to a tag, rounding down if the size
-// does not exactly match a 'tag expressible' size value. The result is
-// undefined if the size exceeds the maximum size that can be encoded in
-// a tag, i.e., if size is larger than TagToAllocatedSize(<max tag>).
-static uint8_t AllocatedSizeToTag(size_t size) {
-  const size_t tag = (size <= 1024) ? size / 8 : 128 + size / 32 - 1024 / 32;
-  assert(tag <= std::numeric_limits<uint8_t>::max());
-  return tag;
-}
-
-// Converts the provided tag to the corresponding allocated size
-static constexpr size_t TagToAllocatedSize(uint8_t tag) {
-  return (tag <= 128) ? (tag * 8) : (1024 + (tag - 128) * 32);
-}
-
-// Converts the provided tag to the corresponding available data length
-static constexpr size_t TagToLength(uint8_t tag) {
-  return TagToAllocatedSize(tag) - kFlatOverhead;
-}
-
-// Enforce that kMaxFlatSize maps to a well-known exact tag value.
-static_assert(TagToAllocatedSize(224) == kMaxFlatSize, "Bad tag logic");
-
-constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) {
-  return n == 0 ? a : Fibonacci(n - 1, b, a + b);
-}
-
-static_assert(Fibonacci(63) == 6557470319842,
-              "Fibonacci values computed incorrectly");
-
-// Minimum length required for a given depth tree -- a tree is considered
-// balanced if
-//      length(t) >= min_length[depth(t)]
-// The root node depth is allowed to become twice as large to reduce rebalancing
-// for larger strings (see IsRootBalanced).
-static constexpr uint64_t min_length[] = {
-    Fibonacci(2),          Fibonacci(3),  Fibonacci(4),  Fibonacci(5),
-    Fibonacci(6),          Fibonacci(7),  Fibonacci(8),  Fibonacci(9),
-    Fibonacci(10),         Fibonacci(11), Fibonacci(12), Fibonacci(13),
-    Fibonacci(14),         Fibonacci(15), Fibonacci(16), Fibonacci(17),
-    Fibonacci(18),         Fibonacci(19), Fibonacci(20), Fibonacci(21),
-    Fibonacci(22),         Fibonacci(23), Fibonacci(24), Fibonacci(25),
-    Fibonacci(26),         Fibonacci(27), Fibonacci(28), Fibonacci(29),
-    Fibonacci(30),         Fibonacci(31), Fibonacci(32), Fibonacci(33),
-    Fibonacci(34),         Fibonacci(35), Fibonacci(36), Fibonacci(37),
-    Fibonacci(38),         Fibonacci(39), Fibonacci(40), Fibonacci(41),
-    Fibonacci(42),         Fibonacci(43), Fibonacci(44), Fibonacci(45),
-    Fibonacci(46),         Fibonacci(47),
-    0xffffffffffffffffull,  // Avoid overflow
-};
-
-static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length);
-
-// The inlined size to use with absl::InlinedVector.
-//
-// Note: The InlinedVectors in this file (and in cord.h) do not need to use
-// the same value for their inlined size. The fact that they do is historical.
-// It may be desirable for each to use a different inlined size optimized for
-// that InlinedVector's usage.
-//
-// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
-// the inlined vector size (47 exists for backward compatibility).
-static const int kInlinedVectorSize = 47;
-
-static inline bool IsRootBalanced(CordRep* node) {
-  if (node->tag != CONCAT) {
-    return true;
-  } else if (node->concat()->depth() <= 15) {
-    return true;
-  } else if (node->concat()->depth() > kMinLengthSize) {
-    return false;
-  } else {
-    // Allow depth to become twice as large as implied by fibonacci rule to
-    // reduce rebalancing for larger strings.
-    return (node->length >= min_length[node->concat()->depth() / 2]);
-  }
-}
-
-static CordRep* Rebalance(CordRep* node);
-static void DumpNode(CordRep* rep, bool include_data, std::ostream* os);
+static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
+                     int indent = 0);
 static bool VerifyNode(CordRep* root, CordRep* start_node,
                        bool full_validation);
 
@@ -217,197 +89,32 @@
   return node;
 }
 
-// --------------------------------------------------------------------
-// Memory management
-
-inline CordRep* Ref(CordRep* rep) {
-  if (rep != nullptr) {
-    rep->refcount.Increment();
-  }
-  return rep;
+static CordRepFlat* CreateFlat(const char* data, size_t length,
+                               size_t alloc_hint) {
+  CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
+  flat->length = length;
+  memcpy(flat->Data(), data, length);
+  return flat;
 }
 
-// This internal routine is called from the cold path of Unref below. Keeping it
-// in a separate routine allows good inlining of Unref into many profitable call
-// sites. However, the call to this function can be highly disruptive to the
-// register pressure in those callers. To minimize the cost to callers, we use
-// a special LLVM calling convention that preserves most registers. This allows
-// the call to this routine in cold paths to not disrupt the caller's register
-// pressure. This calling convention is not available on all platforms; we
-// intentionally allow LLVM to ignore the attribute rather than attempting to
-// hardcode the list of supported platforms.
-#if defined(__clang__) && !defined(__i386__)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wattributes"
-__attribute__((preserve_most))
-#pragma clang diagnostic pop
-#endif
-static void UnrefInternal(CordRep* rep) {
-  assert(rep != nullptr);
-
-  absl::InlinedVector<CordRep*, kInlinedVectorSize> pending;
-  while (true) {
-    if (rep->tag == CONCAT) {
-      CordRepConcat* rep_concat = rep->concat();
-      CordRep* right = rep_concat->right;
-      if (!right->refcount.Decrement()) {
-        pending.push_back(right);
-      }
-      CordRep* left = rep_concat->left;
-      delete rep_concat;
-      rep = nullptr;
-      if (!left->refcount.Decrement()) {
-        rep = left;
-        continue;
-      }
-    } else if (rep->tag == EXTERNAL) {
-      CordRepExternal* rep_external = rep->external();
-      rep_external->releaser_invoker(rep_external);
-      rep = nullptr;
-    } else if (rep->tag == SUBSTRING) {
-      CordRepSubstring* rep_substring = rep->substring();
-      CordRep* child = rep_substring->child;
-      delete rep_substring;
-      rep = nullptr;
-      if (!child->refcount.Decrement()) {
-        rep = child;
-        continue;
-      }
-    } else {
-      // Flat CordReps are allocated and constructed with raw ::operator new
-      // and placement new, and must be destructed and deallocated
-      // accordingly.
-#if defined(__cpp_sized_deallocation)
-      size_t size = TagToAllocatedSize(rep->tag);
-      rep->~CordRep();
-      ::operator delete(rep, size);
-#else
-      rep->~CordRep();
-      ::operator delete(rep);
-#endif
-      rep = nullptr;
-    }
-
-    if (!pending.empty()) {
-      rep = pending.back();
-      pending.pop_back();
-    } else {
-      break;
-    }
-  }
-}
-
-inline void Unref(CordRep* rep) {
-  // Fast-path for two common, hot cases: a null rep and a shared root.
-  if (ABSL_PREDICT_TRUE(rep == nullptr ||
-                        rep->refcount.DecrementExpectHighRefcount())) {
-    return;
-  }
-
-  UnrefInternal(rep);
-}
-
-// Return the depth of a node
-static int Depth(const CordRep* rep) {
-  if (rep->tag == CONCAT) {
-    return rep->concat()->depth();
-  } else {
-    return 0;
-  }
-}
-
-static void SetConcatChildren(CordRepConcat* concat, CordRep* left,
-                              CordRep* right) {
-  concat->left = left;
-  concat->right = right;
-
-  concat->length = left->length + right->length;
-  concat->set_depth(1 + std::max(Depth(left), Depth(right)));
-}
-
-// Create a concatenation of the specified nodes.
-// Does not change the refcounts of "left" and "right".
+// Creates a new flat or Btree out of the specified array.
 // The returned node has a refcount of 1.
-static CordRep* RawConcat(CordRep* left, CordRep* right) {
-  // Avoid making degenerate concat nodes (one child is empty)
-  if (left == nullptr || left->length == 0) {
-    Unref(left);
-    return right;
+static CordRep* NewBtree(const char* data, size_t length, size_t alloc_hint) {
+  if (length <= kMaxFlatLength) {
+    return CreateFlat(data, length, alloc_hint);
   }
-  if (right == nullptr || right->length == 0) {
-    Unref(right);
-    return left;
-  }
-
-  CordRepConcat* rep = new CordRepConcat();
-  rep->tag = CONCAT;
-  SetConcatChildren(rep, left, right);
-
-  return rep;
-}
-
-static CordRep* Concat(CordRep* left, CordRep* right) {
-  CordRep* rep = RawConcat(left, right);
-  if (rep != nullptr && !IsRootBalanced(rep)) {
-    rep = Rebalance(rep);
-  }
-  return VerifyTree(rep);
-}
-
-// Make a balanced tree out of an array of leaf nodes.
-static CordRep* MakeBalancedTree(CordRep** reps, size_t n) {
-  // Make repeated passes over the array, merging adjacent pairs
-  // until we are left with just a single node.
-  while (n > 1) {
-    size_t dst = 0;
-    for (size_t src = 0; src < n; src += 2) {
-      if (src + 1 < n) {
-        reps[dst] = Concat(reps[src], reps[src + 1]);
-      } else {
-        reps[dst] = reps[src];
-      }
-      dst++;
-    }
-    n = dst;
-  }
-
-  return reps[0];
-}
-
-// Create a new flat node.
-static CordRep* NewFlat(size_t length_hint) {
-  if (length_hint <= kMinFlatLength) {
-    length_hint = kMinFlatLength;
-  } else if (length_hint > kMaxFlatLength) {
-    length_hint = kMaxFlatLength;
-  }
-
-  // Round size up so it matches a size we can exactly express in a tag.
-  const size_t size = RoundUpForTag(length_hint + kFlatOverhead);
-  void* const raw_rep = ::operator new(size);
-  CordRep* rep = new (raw_rep) CordRep();
-  rep->tag = AllocatedSizeToTag(size);
-  return VerifyTree(rep);
+  CordRepFlat* flat = CreateFlat(data, kMaxFlatLength, 0);
+  data += kMaxFlatLength;
+  length -= kMaxFlatLength;
+  auto* root = CordRepBtree::Create(flat);
+  return CordRepBtree::Append(root, {data, length}, alloc_hint);
 }
 
 // Create a new tree out of the specified array.
 // The returned node has a refcount of 1.
-static CordRep* NewTree(const char* data,
-                        size_t length,
-                        size_t alloc_hint) {
+static CordRep* NewTree(const char* data, size_t length, size_t alloc_hint) {
   if (length == 0) return nullptr;
-  absl::FixedArray<CordRep*> reps((length - 1) / kMaxFlatLength + 1);
-  size_t n = 0;
-  do {
-    const size_t len = std::min(length, kMaxFlatLength);
-    CordRep* rep = NewFlat(len + alloc_hint);
-    rep->length = len;
-    memcpy(rep->data, data, len);
-    reps[n++] = VerifyTree(rep);
-    data += len;
-    length -= len;
-  } while (length != 0);
-  return MakeBalancedTree(reps.data(), n);
+  return NewBtree(data, length, alloc_hint);
 }
 
 namespace cord_internal {
@@ -422,87 +129,131 @@
 
 }  // namespace cord_internal
 
-static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) {
-  // Never create empty substring nodes
-  if (length == 0) {
-    Unref(child);
-    return nullptr;
-  } else {
-    CordRepSubstring* rep = new CordRepSubstring();
-    assert((offset + length) <= child->length);
-    rep->length = length;
-    rep->tag = SUBSTRING;
-    rep->start = offset;
-    rep->child = child;
-    return VerifyTree(rep);
+// Creates a CordRep from the provided string. If the string is large enough,
+// and not wasteful, we move the string into an external cord rep, preserving
+// the already allocated string contents.
+// Requires the provided string length to be larger than `kMaxInline`.
+static CordRep* CordRepFromString(std::string&& src) {
+  assert(src.length() > cord_internal::kMaxInline);
+  if (
+      // String is short: copy data to avoid external block overhead.
+      src.size() <= kMaxBytesToCopy ||
+      // String is wasteful: copy data to avoid pinning too much unused memory.
+      src.size() < src.capacity() / 2
+  ) {
+    return NewTree(src.data(), src.size(), 0);
   }
+
+  struct StringReleaser {
+    void operator()(absl::string_view /* data */) {}
+    std::string data;
+  };
+  const absl::string_view original_data = src;
+  auto* rep =
+      static_cast<::absl::cord_internal::CordRepExternalImpl<StringReleaser>*>(
+          absl::cord_internal::NewExternalRep(original_data,
+                                              StringReleaser{std::move(src)}));
+  // Moving src may have invalidated its data pointer, so adjust it.
+  rep->base = rep->template get<0>().data.data();
+  return rep;
 }
 
 // --------------------------------------------------------------------
 // Cord::InlineRep functions
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr unsigned char Cord::InlineRep::kMaxInline;
+#endif
 
-inline void Cord::InlineRep::set_data(const char* data, size_t n,
-                                      bool nullify_tail) {
+inline void Cord::InlineRep::set_data(const char* data, size_t n) {
   static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
-
-  cord_internal::SmallMemmove(data_, data, n, nullify_tail);
-  data_[kMaxInline] = static_cast<char>(n);
+  data_.set_inline_data(data, n);
 }
 
 inline char* Cord::InlineRep::set_data(size_t n) {
   assert(n <= kMaxInline);
-  memset(data_, 0, sizeof(data_));
-  data_[kMaxInline] = static_cast<char>(n);
-  return data_;
-}
-
-inline CordRep* Cord::InlineRep::force_tree(size_t extra_hint) {
-  size_t len = data_[kMaxInline];
-  CordRep* result;
-  if (len > kMaxInline) {
-    memcpy(&result, data_, sizeof(result));
-  } else {
-    result = NewFlat(len + extra_hint);
-    result->length = len;
-    memcpy(result->data, data_, len);
-    set_tree(result);
-  }
-  return result;
+  ResetToEmpty();
+  set_inline_size(n);
+  return data_.as_chars();
 }
 
 inline void Cord::InlineRep::reduce_size(size_t n) {
-  size_t tag = data_[kMaxInline];
+  size_t tag = inline_size();
   assert(tag <= kMaxInline);
   assert(tag >= n);
   tag -= n;
-  memset(data_ + tag, 0, n);
-  data_[kMaxInline] = static_cast<char>(tag);
+  memset(data_.as_chars() + tag, 0, n);
+  set_inline_size(tag);
 }
 
 inline void Cord::InlineRep::remove_prefix(size_t n) {
-  cord_internal::SmallMemmove(data_, data_ + n, data_[kMaxInline] - n);
+  cord_internal::SmallMemmove(data_.as_chars(), data_.as_chars() + n,
+                              inline_size() - n);
   reduce_size(n);
 }
 
-void Cord::InlineRep::AppendTree(CordRep* tree) {
-  if (tree == nullptr) return;
-  size_t len = data_[kMaxInline];
-  if (len == 0) {
-    set_tree(tree);
+// Returns `rep` converted into a CordRepBtree.
+// Directly returns `rep` if `rep` is already a CordRepBtree.
+static CordRepBtree* ForceBtree(CordRep* rep) {
+  return rep->IsBtree()
+             ? rep->btree()
+             : CordRepBtree::Create(cord_internal::RemoveCrcNode(rep));
+}
+
+void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
+                                          MethodIdentifier method) {
+  assert(!is_tree());
+  if (!data_.is_empty()) {
+    CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
+    tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
+  }
+  EmplaceTree(tree, method);
+}
+
+void Cord::InlineRep::AppendTreeToTree(CordRep* tree, MethodIdentifier method) {
+  assert(is_tree());
+  const CordzUpdateScope scope(data_.cordz_info(), method);
+  tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
+  SetTree(tree, scope);
+}
+
+void Cord::InlineRep::AppendTree(CordRep* tree, MethodIdentifier method) {
+  assert(tree != nullptr);
+  assert(tree->length != 0);
+  assert(!tree->IsCrc());
+  if (data_.is_tree()) {
+    AppendTreeToTree(tree, method);
   } else {
-    set_tree(Concat(force_tree(0), tree));
+    AppendTreeToInlined(tree, method);
   }
 }
 
-void Cord::InlineRep::PrependTree(CordRep* tree) {
+void Cord::InlineRep::PrependTreeToInlined(CordRep* tree,
+                                           MethodIdentifier method) {
+  assert(!is_tree());
+  if (!data_.is_empty()) {
+    CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
+    tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
+  }
+  EmplaceTree(tree, method);
+}
+
+void Cord::InlineRep::PrependTreeToTree(CordRep* tree,
+                                        MethodIdentifier method) {
+  assert(is_tree());
+  const CordzUpdateScope scope(data_.cordz_info(), method);
+  tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
+  SetTree(tree, scope);
+}
+
+void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) {
   assert(tree != nullptr);
-  size_t len = data_[kMaxInline];
-  if (len == 0) {
-    set_tree(tree);
+  assert(tree->length != 0);
+  assert(!tree->IsCrc());
+  if (data_.is_tree()) {
+    PrependTreeToTree(tree, method);
   } else {
-    set_tree(Concat(tree, force_tree(0)));
+    PrependTreeToInlined(tree, method);
   }
 }
 
@@ -512,170 +263,88 @@
 // written to region and the actual size increase will be written to size.
 static inline bool PrepareAppendRegion(CordRep* root, char** region,
                                        size_t* size, size_t max_length) {
-  // Search down the right-hand path for a non-full FLAT node.
-  CordRep* dst = root;
-  while (dst->tag == CONCAT && dst->refcount.IsOne()) {
-    dst = dst->concat()->right;
+  if (root->IsBtree() && root->refcount.IsOne()) {
+    Span<char> span = root->btree()->GetAppendBuffer(max_length);
+    if (!span.empty()) {
+      *region = span.data();
+      *size = span.size();
+      return true;
+    }
   }
 
-  if (dst->tag < FLAT || !dst->refcount.IsOne()) {
+  CordRep* dst = root;
+  if (!dst->IsFlat() || !dst->refcount.IsOne()) {
     *region = nullptr;
     *size = 0;
     return false;
   }
 
   const size_t in_use = dst->length;
-  const size_t capacity = TagToLength(dst->tag);
+  const size_t capacity = dst->flat()->Capacity();
   if (in_use == capacity) {
     *region = nullptr;
     *size = 0;
     return false;
   }
 
-  size_t size_increase = std::min(capacity - in_use, max_length);
-
-  // We need to update the length fields for all nodes, including the leaf node.
-  for (CordRep* rep = root; rep != dst; rep = rep->concat()->right) {
-    rep->length += size_increase;
-  }
+  const size_t size_increase = std::min(capacity - in_use, max_length);
   dst->length += size_increase;
 
-  *region = dst->data + in_use;
+  *region = dst->flat()->Data() + in_use;
   *size = size_increase;
   return true;
 }
 
-void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
-                                      size_t max_length) {
-  if (max_length == 0) {
-    *region = nullptr;
-    *size = 0;
-    return;
-  }
-
-  // Try to fit in the inline buffer if possible.
-  size_t inline_length = data_[kMaxInline];
-  if (inline_length < kMaxInline && max_length <= kMaxInline - inline_length) {
-    *region = data_ + inline_length;
-    *size = max_length;
-    data_[kMaxInline] = static_cast<char>(inline_length + max_length);
-    return;
-  }
-
-  CordRep* root = force_tree(max_length);
-
-  if (PrepareAppendRegion(root, region, size, max_length)) {
-    return;
-  }
-
-  // Allocate new node.
-  CordRep* new_node =
-      NewFlat(std::max(static_cast<size_t>(root->length), max_length));
-  new_node->length =
-      std::min(static_cast<size_t>(TagToLength(new_node->tag)), max_length);
-  *region = new_node->data;
-  *size = new_node->length;
-  replace_tree(Concat(root, new_node));
-}
-
-void Cord::InlineRep::GetAppendRegion(char** region, size_t* size) {
-  const size_t max_length = std::numeric_limits<size_t>::max();
-
-  // Try to fit in the inline buffer if possible.
-  size_t inline_length = data_[kMaxInline];
-  if (inline_length < kMaxInline) {
-    *region = data_ + inline_length;
-    *size = kMaxInline - inline_length;
-    data_[kMaxInline] = kMaxInline;
-    return;
-  }
-
-  CordRep* root = force_tree(max_length);
-
-  if (PrepareAppendRegion(root, region, size, max_length)) {
-    return;
-  }
-
-  // Allocate new node.
-  CordRep* new_node = NewFlat(root->length);
-  new_node->length = TagToLength(new_node->tag);
-  *region = new_node->data;
-  *size = new_node->length;
-  replace_tree(Concat(root, new_node));
-}
-
-// If the rep is a leaf, this will increment the value at total_mem_usage and
-// will return true.
-static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) {
-  if (rep->tag >= FLAT) {
-    *total_mem_usage += TagToAllocatedSize(rep->tag);
-    return true;
-  }
-  if (rep->tag == EXTERNAL) {
-    *total_mem_usage += sizeof(CordRepConcat) + rep->length;
-    return true;
-  }
-  return false;
-}
-
 void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
-  ClearSlow();
-
-  memcpy(data_, src.data_, sizeof(data_));
-  if (is_tree()) {
-    Ref(tree());
+  assert(&src != this);
+  assert(is_tree() || src.is_tree());
+  auto constexpr method = CordzUpdateTracker::kAssignCord;
+  if (ABSL_PREDICT_TRUE(!is_tree())) {
+    EmplaceTree(CordRep::Ref(src.as_tree()), src.data_, method);
+    return;
   }
+
+  CordRep* tree = as_tree();
+  if (CordRep* src_tree = src.tree()) {
+    // Leave any existing `cordz_info` in place, and let MaybeTrackCord()
+    // decide if this cord should be (or remains to be) sampled or not.
+    data_.set_tree(CordRep::Ref(src_tree));
+    CordzInfo::MaybeTrackCord(data_, src.data_, method);
+  } else {
+    CordzInfo::MaybeUntrackCord(data_.cordz_info());
+    data_ = src.data_;
+  }
+  CordRep::Unref(tree);
 }
 
-void Cord::InlineRep::ClearSlow() {
+void Cord::InlineRep::UnrefTree() {
   if (is_tree()) {
-    Unref(tree());
+    CordzInfo::MaybeUntrackCord(data_.cordz_info());
+    CordRep::Unref(tree());
   }
-  memset(data_, 0, sizeof(data_));
 }
 
 // --------------------------------------------------------------------
 // Constructors and destructors
 
-Cord::Cord(const Cord& src) : contents_(src.contents_) {
-  Ref(contents_.tree());  // Does nothing if contents_ has embedded data
-}
-
-Cord::Cord(absl::string_view src) {
+Cord::Cord(absl::string_view src, MethodIdentifier method)
+    : contents_(InlineData::kDefaultInit) {
   const size_t n = src.size();
   if (n <= InlineRep::kMaxInline) {
-    contents_.set_data(src.data(), n, false);
+    contents_.set_data(src.data(), n);
   } else {
-    contents_.set_tree(NewTree(src.data(), n, 0));
+    CordRep* rep = NewTree(src.data(), n, 0);
+    contents_.EmplaceTree(rep, method);
   }
 }
 
 template <typename T, Cord::EnableIfString<T>>
-Cord::Cord(T&& src) {
-  if (
-      // String is short: copy data to avoid external block overhead.
-      src.size() <= kMaxBytesToCopy ||
-      // String is wasteful: copy data to avoid pinning too much unused memory.
-      src.size() < src.capacity() / 2
-  ) {
-    if (src.size() <= InlineRep::kMaxInline) {
-      contents_.set_data(src.data(), src.size(), false);
-    } else {
-      contents_.set_tree(NewTree(src.data(), src.size(), 0));
-    }
+Cord::Cord(T&& src) : contents_(InlineData::kDefaultInit) {
+  if (src.size() <= InlineRep::kMaxInline) {
+    contents_.set_data(src.data(), src.size());
   } else {
-    struct StringReleaser {
-      void operator()(absl::string_view /* data */) {}
-      std::string data;
-    };
-    const absl::string_view original_data = src;
-    auto* rep = static_cast<
-        ::absl::cord_internal::CordRepExternalImpl<StringReleaser>*>(
-        absl::cord_internal::NewExternalRep(
-            original_data, StringReleaser{std::forward<T>(src)}));
-    // Moving src may have invalidated its data pointer, so adjust it.
-    rep->base = rep->template get<0>().data.data();
-    contents_.set_tree(rep);
+    CordRep* rep = CordRepFromString(std::forward<T>(src));
+    contents_.EmplaceTree(rep, CordzUpdateTracker::kConstructorString);
   }
 }
 
@@ -684,113 +353,120 @@
 // The destruction code is separate so that the compiler can determine
 // that it does not need to call the destructor on a moved-from Cord.
 void Cord::DestroyCordSlow() {
-  Unref(VerifyTree(contents_.tree()));
+  assert(contents_.is_tree());
+  CordzInfo::MaybeUntrackCord(contents_.cordz_info());
+  CordRep::Unref(VerifyTree(contents_.as_tree()));
 }
 
 // --------------------------------------------------------------------
 // Mutators
 
 void Cord::Clear() {
-  Unref(contents_.clear());
+  if (CordRep* tree = contents_.clear()) {
+    CordRep::Unref(tree);
+  }
+}
+
+Cord& Cord::AssignLargeString(std::string&& src) {
+  auto constexpr method = CordzUpdateTracker::kAssignString;
+  assert(src.size() > kMaxBytesToCopy);
+  CordRep* rep = CordRepFromString(std::move(src));
+  if (CordRep* tree = contents_.tree()) {
+    CordzUpdateScope scope(contents_.cordz_info(), method);
+    contents_.SetTree(rep, scope);
+    CordRep::Unref(tree);
+  } else {
+    contents_.EmplaceTree(rep, method);
+  }
+  return *this;
 }
 
 Cord& Cord::operator=(absl::string_view src) {
-
+  auto constexpr method = CordzUpdateTracker::kAssignString;
   const char* data = src.data();
   size_t length = src.size();
   CordRep* tree = contents_.tree();
   if (length <= InlineRep::kMaxInline) {
-    // Embed into this->contents_
-    contents_.set_data(data, length, true);
-    Unref(tree);
+    // Embed into this->contents_, which is somewhat subtle:
+    // - MaybeUntrackCord must be called before Unref(tree).
+    // - MaybeUntrackCord must be called before set_data() clobbers cordz_info.
+    // - set_data() must be called before Unref(tree) as it may reference tree.
+    if (tree != nullptr) CordzInfo::MaybeUntrackCord(contents_.cordz_info());
+    contents_.set_data(data, length);
+    if (tree != nullptr) CordRep::Unref(tree);
     return *this;
   }
-  if (tree != nullptr && tree->tag >= FLAT &&
-      TagToLength(tree->tag) >= length && tree->refcount.IsOne()) {
-    // Copy in place if the existing FLAT node is reusable.
-    memmove(tree->data, data, length);
-    tree->length = length;
-    VerifyTree(tree);
-    return *this;
-  }
-  contents_.set_tree(NewTree(data, length, 0));
-  Unref(tree);
-  return *this;
-}
-
-template <typename T, Cord::EnableIfString<T>>
-Cord& Cord::operator=(T&& src) {
-  if (src.size() <= kMaxBytesToCopy) {
-    *this = absl::string_view(src);
+  if (tree != nullptr) {
+    CordzUpdateScope scope(contents_.cordz_info(), method);
+    if (tree->IsFlat() && tree->flat()->Capacity() >= length &&
+        tree->refcount.IsOne()) {
+      // Copy in place if the existing FLAT node is reusable.
+      memmove(tree->flat()->Data(), data, length);
+      tree->length = length;
+      VerifyTree(tree);
+      return *this;
+    }
+    contents_.SetTree(NewTree(data, length, 0), scope);
+    CordRep::Unref(tree);
   } else {
-    *this = Cord(std::forward<T>(src));
+    contents_.EmplaceTree(NewTree(data, length, 0), method);
   }
   return *this;
 }
 
-template Cord& Cord::operator=(std::string&& src);
-
 // TODO(sanjay): Move to Cord::InlineRep section of file.  For now,
 // we keep it here to make diffs easier.
-void Cord::InlineRep::AppendArray(const char* src_data, size_t src_size) {
-  if (src_size == 0) return;  // memcpy(_, nullptr, 0) is undefined.
-  // Try to fit in the inline buffer if possible.
-  size_t inline_length = data_[kMaxInline];
-  if (inline_length < kMaxInline && src_size <= kMaxInline - inline_length) {
-    // Append new data to embedded array
-    data_[kMaxInline] = static_cast<char>(inline_length + src_size);
-    memcpy(data_ + inline_length, src_data, src_size);
-    return;
-  }
-
-  CordRep* root = tree();
+void Cord::InlineRep::AppendArray(absl::string_view src,
+                                  MethodIdentifier method) {
+  MaybeRemoveEmptyCrcNode();
+  if (src.empty()) return;  // memcpy(_, nullptr, 0) is undefined.
 
   size_t appended = 0;
-  if (root) {
+  CordRep* rep = tree();
+  const CordRep* const root = rep;
+  CordzUpdateScope scope(root ? cordz_info() : nullptr, method);
+  if (root != nullptr) {
+    rep = cord_internal::RemoveCrcNode(rep);
     char* region;
-    if (PrepareAppendRegion(root, &region, &appended, src_size)) {
-      memcpy(region, src_data, appended);
+    if (PrepareAppendRegion(rep, &region, &appended, src.size())) {
+      memcpy(region, src.data(), appended);
     }
   } else {
-    // It is possible that src_data == data_, but when we transition from an
-    // InlineRep to a tree we need to assign data_ = root via set_tree. To
-    // avoid corrupting the source data before we copy it, delay calling
-    // set_tree until after we've copied data.
-    // We are going from an inline size to beyond inline size. Make the new size
-    // either double the inlined size, or the added size + 10%.
-    const size_t size1 = inline_length * 2 + src_size;
-    const size_t size2 = inline_length + src_size / 10;
-    root = NewFlat(std::max<size_t>(size1, size2));
-    appended = std::min(src_size, TagToLength(root->tag) - inline_length);
-    memcpy(root->data, data_, inline_length);
-    memcpy(root->data + inline_length, src_data, appended);
-    root->length = inline_length + appended;
-    set_tree(root);
+    // Try to fit in the inline buffer if possible.
+    size_t inline_length = inline_size();
+    if (src.size() <= kMaxInline - inline_length) {
+      // Append new data to embedded array
+      set_inline_size(inline_length + src.size());
+      memcpy(data_.as_chars() + inline_length, src.data(), src.size());
+      return;
+    }
+
+    // Allocate flat to be a perfect fit on first append exceeding inlined size.
+    // Subsequent growth will use amortized growth until we reach maximum flat
+    // size.
+    rep = CordRepFlat::New(inline_length + src.size());
+    appended = std::min(src.size(), rep->flat()->Capacity() - inline_length);
+    memcpy(rep->flat()->Data(), data_.as_chars(), inline_length);
+    memcpy(rep->flat()->Data() + inline_length, src.data(), appended);
+    rep->length = inline_length + appended;
   }
 
-  src_data += appended;
-  src_size -= appended;
-  if (src_size == 0) {
+  src.remove_prefix(appended);
+  if (src.empty()) {
+    CommitTree(root, rep, scope, method);
     return;
   }
 
-  // Use new block(s) for any remaining bytes that were not handled above.
-  // Alloc extra memory only if the right child of the root of the new tree is
-  // going to be a FLAT node, which will permit further inplace appends.
-  size_t length = src_size;
-  if (src_size < kMaxFlatLength) {
-    // The new length is either
-    // - old size + 10%
-    // - old_size + src_size
-    // This will cause a reasonable conservative step-up in size that is still
-    // large enough to avoid excessive amounts of small fragments being added.
-    length = std::max<size_t>(root->length / 10, src_size);
-  }
-  set_tree(Concat(root, NewTree(src_data, src_size, length - src_size)));
+  // TODO(b/192061034): keep legacy 10% growth rate: consider other rates.
+  rep = ForceBtree(rep);
+  const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
+  rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
+
+  CommitTree(root, rep, scope, method);
 }
 
 inline CordRep* Cord::TakeRep() const& {
-  return Ref(contents_.tree());
+  return CordRep::Ref(contents_.tree());
 }
 
 inline CordRep* Cord::TakeRep() && {
@@ -801,10 +477,22 @@
 
 template <typename C>
 inline void Cord::AppendImpl(C&& src) {
+  auto constexpr method = CordzUpdateTracker::kAppendCord;
+
+  contents_.MaybeRemoveEmptyCrcNode();
+  if (src.empty()) return;
+
   if (empty()) {
-    // In case of an empty destination avoid allocating a new node, do not copy
-    // data.
-    *this = std::forward<C>(src);
+    // Since destination is empty, we can avoid allocating a node,
+    if (src.contents_.is_tree()) {
+      // by taking the tree directly
+      CordRep* rep =
+          cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
+      contents_.EmplaceTree(rep, method);
+    } else {
+      // or copying over inline data
+      contents_.data_ = src.contents_.data_;
+    }
     return;
   }
 
@@ -814,12 +502,12 @@
     CordRep* src_tree = src.contents_.tree();
     if (src_tree == nullptr) {
       // src has embedded data.
-      contents_.AppendArray(src.contents_.data(), src_size);
+      contents_.AppendArray({src.contents_.data(), src_size}, method);
       return;
     }
-    if (src_tree->tag >= FLAT) {
+    if (src_tree->IsFlat()) {
       // src tree just has one flat node.
-      contents_.AppendArray(src_tree->data, src_size);
+      contents_.AppendArray({src_tree->flat()->Data(), src_size}, method);
       return;
     }
     if (&src == this) {
@@ -834,29 +522,86 @@
     return;
   }
 
-  contents_.AppendTree(std::forward<C>(src).TakeRep());
+  // Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
+  CordRep* rep = cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
+  contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
 }
 
-void Cord::Append(const Cord& src) { AppendImpl(src); }
+static CordRep::ExtractResult ExtractAppendBuffer(CordRep* rep,
+                                                  size_t min_capacity) {
+  switch (rep->tag) {
+    case cord_internal::BTREE:
+      return CordRepBtree::ExtractAppendBuffer(rep->btree(), min_capacity);
+    default:
+      if (rep->IsFlat() && rep->refcount.IsOne() &&
+          rep->flat()->Capacity() - rep->length >= min_capacity) {
+        return {nullptr, rep};
+      }
+      return {rep, nullptr};
+  }
+}
 
-void Cord::Append(Cord&& src) { AppendImpl(std::move(src)); }
+static CordBuffer CreateAppendBuffer(InlineData& data, size_t block_size,
+                                     size_t capacity) {
+  // Watch out for overflow, people can ask for size_t::max().
+  const size_t size = data.inline_size();
+  const size_t max_capacity = std::numeric_limits<size_t>::max() - size;
+  capacity = (std::min)(max_capacity, capacity) + size;
+  CordBuffer buffer =
+      block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity)
+                 : CordBuffer::CreateWithDefaultLimit(capacity);
+  cord_internal::SmallMemmove(buffer.data(), data.as_chars(), size);
+  buffer.SetLength(size);
+  data = {};
+  return buffer;
+}
+
+CordBuffer Cord::GetAppendBufferSlowPath(size_t block_size, size_t capacity,
+                                         size_t min_capacity) {
+  auto constexpr method = CordzUpdateTracker::kGetAppendBuffer;
+  CordRep* tree = contents_.tree();
+  if (tree != nullptr) {
+    CordzUpdateScope scope(contents_.cordz_info(), method);
+    CordRep::ExtractResult result = ExtractAppendBuffer(tree, min_capacity);
+    if (result.extracted != nullptr) {
+      contents_.SetTreeOrEmpty(result.tree, scope);
+      return CordBuffer(result.extracted->flat());
+    }
+    return block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity)
+                      : CordBuffer::CreateWithDefaultLimit(capacity);
+  }
+  return CreateAppendBuffer(contents_.data_, block_size, capacity);
+}
+
+void Cord::Append(const Cord& src) {
+  AppendImpl(src);
+}
+
+void Cord::Append(Cord&& src) {
+  AppendImpl(std::move(src));
+}
 
 template <typename T, Cord::EnableIfString<T>>
 void Cord::Append(T&& src) {
   if (src.size() <= kMaxBytesToCopy) {
     Append(absl::string_view(src));
   } else {
-    Append(Cord(std::forward<T>(src)));
+    CordRep* rep = CordRepFromString(std::forward<T>(src));
+    contents_.AppendTree(rep, CordzUpdateTracker::kAppendString);
   }
 }
 
 template void Cord::Append(std::string&& src);
 
 void Cord::Prepend(const Cord& src) {
+  contents_.MaybeRemoveEmptyCrcNode();
+  if (src.empty()) return;
+
   CordRep* src_tree = src.contents_.tree();
   if (src_tree != nullptr) {
-    Ref(src_tree);
-    contents_.PrependTree(src_tree);
+    CordRep::Ref(src_tree);
+    contents_.PrependTree(cord_internal::RemoveCrcNode(src_tree),
+                          CordzUpdateTracker::kPrependCord);
     return;
   }
 
@@ -865,19 +610,50 @@
   return Prepend(src_contents);
 }
 
-void Cord::Prepend(absl::string_view src) {
+void Cord::PrependArray(absl::string_view src, MethodIdentifier method) {
+  contents_.MaybeRemoveEmptyCrcNode();
   if (src.empty()) return;  // memcpy(_, nullptr, 0) is undefined.
-  size_t cur_size = contents_.size();
-  if (!contents_.is_tree() && cur_size + src.size() <= InlineRep::kMaxInline) {
-    // Use embedded storage.
-    char data[InlineRep::kMaxInline + 1] = {0};
-    data[InlineRep::kMaxInline] = cur_size + src.size();  // set size
-    memcpy(data, src.data(), src.size());
-    memcpy(data + src.size(), contents_.data(), cur_size);
-    memcpy(reinterpret_cast<void*>(&contents_), data,
-           InlineRep::kMaxInline + 1);
+
+  if (!contents_.is_tree()) {
+    size_t cur_size = contents_.inline_size();
+    if (cur_size + src.size() <= InlineRep::kMaxInline) {
+      // Use embedded storage.
+      InlineData data;
+      data.set_inline_size(cur_size + src.size());
+      memcpy(data.as_chars(), src.data(), src.size());
+      memcpy(data.as_chars() + src.size(), contents_.data(), cur_size);
+      contents_.data_ = data;
+      return;
+    }
+  }
+  CordRep* rep = NewTree(src.data(), src.size(), 0);
+  contents_.PrependTree(rep, method);
+}
+
+void Cord::AppendPrecise(absl::string_view src, MethodIdentifier method) {
+  assert(!src.empty());
+  assert(src.size() <= cord_internal::kMaxFlatLength);
+  if (contents_.remaining_inline_capacity() >= src.size()) {
+    const size_t inline_length = contents_.inline_size();
+    contents_.set_inline_size(inline_length + src.size());
+    memcpy(contents_.data_.as_chars() + inline_length, src.data(), src.size());
   } else {
-    contents_.PrependTree(NewTree(src.data(), src.size(), 0));
+    contents_.AppendTree(CordRepFlat::Create(src), method);
+  }
+}
+
+void Cord::PrependPrecise(absl::string_view src, MethodIdentifier method) {
+  assert(!src.empty());
+  assert(src.size() <= cord_internal::kMaxFlatLength);
+  if (contents_.remaining_inline_capacity() >= src.size()) {
+    const size_t cur_size = contents_.inline_size();
+    InlineData data;
+    data.set_inline_size(cur_size + src.size());
+    memcpy(data.as_chars(), src.data(), src.size());
+    memcpy(data.as_chars() + src.size(), contents_.data(), cur_size);
+    contents_.data_ = data;
+  } else {
+    contents_.PrependTree(CordRepFlat::Create(src), method);
   }
 }
 
@@ -886,108 +662,41 @@
   if (src.size() <= kMaxBytesToCopy) {
     Prepend(absl::string_view(src));
   } else {
-    Prepend(Cord(std::forward<T>(src)));
+    CordRep* rep = CordRepFromString(std::forward<T>(src));
+    contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
   }
 }
 
 template void Cord::Prepend(std::string&& src);
 
-static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
-  if (n >= node->length) return nullptr;
-  if (n == 0) return Ref(node);
-  absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack;
-
-  while (node->tag == CONCAT) {
-    assert(n <= node->length);
-    if (n < node->concat()->left->length) {
-      // Push right to stack, descend left.
-      rhs_stack.push_back(node->concat()->right);
-      node = node->concat()->left;
-    } else {
-      // Drop left, descend right.
-      n -= node->concat()->left->length;
-      node = node->concat()->right;
-    }
-  }
-  assert(n <= node->length);
-
-  if (n == 0) {
-    Ref(node);
-  } else {
-    size_t start = n;
-    size_t len = node->length - n;
-    if (node->tag == SUBSTRING) {
-      // Consider in-place update of node, similar to in RemoveSuffixFrom().
-      start += node->substring()->start;
-      node = node->substring()->child;
-    }
-    node = NewSubstring(Ref(node), start, len);
-  }
-  while (!rhs_stack.empty()) {
-    node = Concat(node, Ref(rhs_stack.back()));
-    rhs_stack.pop_back();
-  }
-  return node;
-}
-
-// RemoveSuffixFrom() is very similar to RemovePrefixFrom(), with the
-// exception that removing a suffix has an optimization where a node may be
-// edited in place iff that node and all its ancestors have a refcount of 1.
-static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
-  if (n >= node->length) return nullptr;
-  if (n == 0) return Ref(node);
-  absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack;
-  bool inplace_ok = node->refcount.IsOne();
-
-  while (node->tag == CONCAT) {
-    assert(n <= node->length);
-    if (n < node->concat()->right->length) {
-      // Push left to stack, descend right.
-      lhs_stack.push_back(node->concat()->left);
-      node = node->concat()->right;
-    } else {
-      // Drop right, descend left.
-      n -= node->concat()->right->length;
-      node = node->concat()->left;
-    }
-    inplace_ok = inplace_ok && node->refcount.IsOne();
-  }
-  assert(n <= node->length);
-
-  if (n == 0) {
-    Ref(node);
-  } else if (inplace_ok && node->tag != EXTERNAL) {
-    // Consider making a new buffer if the current node capacity is much
-    // larger than the new length.
-    Ref(node);
-    node->length -= n;
-  } else {
-    size_t start = 0;
-    size_t len = node->length - n;
-    if (node->tag == SUBSTRING) {
-      start = node->substring()->start;
-      node = node->substring()->child;
-    }
-    node = NewSubstring(Ref(node), start, len);
-  }
-  while (!lhs_stack.empty()) {
-    node = Concat(Ref(lhs_stack.back()), node);
-    lhs_stack.pop_back();
-  }
-  return node;
-}
-
 void Cord::RemovePrefix(size_t n) {
   ABSL_INTERNAL_CHECK(n <= size(),
                       absl::StrCat("Requested prefix size ", n,
                                    " exceeds Cord's size ", size()));
+  contents_.MaybeRemoveEmptyCrcNode();
   CordRep* tree = contents_.tree();
   if (tree == nullptr) {
     contents_.remove_prefix(n);
   } else {
-    CordRep* newrep = RemovePrefixFrom(tree, n);
-    Unref(tree);
-    contents_.replace_tree(VerifyTree(newrep));
+    auto constexpr method = CordzUpdateTracker::kRemovePrefix;
+    CordzUpdateScope scope(contents_.cordz_info(), method);
+    tree = cord_internal::RemoveCrcNode(tree);
+    if (n >= tree->length) {
+      CordRep::Unref(tree);
+      tree = nullptr;
+    } else if (tree->IsBtree()) {
+      CordRep* old = tree;
+      tree = tree->btree()->SubTree(n, tree->length - n);
+      CordRep::Unref(old);
+    } else if (tree->IsSubstring() && tree->refcount.IsOne()) {
+      tree->substring()->start += n;
+      tree->length -= n;
+    } else {
+      CordRep* rep = CordRepSubstring::Substring(tree, n, tree->length - n);
+      CordRep::Unref(tree);
+      tree = rep;
+    }
+    contents_.SetTreeOrEmpty(tree, scope);
   }
 }
 
@@ -995,65 +704,29 @@
   ABSL_INTERNAL_CHECK(n <= size(),
                       absl::StrCat("Requested suffix size ", n,
                                    " exceeds Cord's size ", size()));
+  contents_.MaybeRemoveEmptyCrcNode();
   CordRep* tree = contents_.tree();
   if (tree == nullptr) {
     contents_.reduce_size(n);
   } else {
-    CordRep* newrep = RemoveSuffixFrom(tree, n);
-    Unref(tree);
-    contents_.replace_tree(VerifyTree(newrep));
-  }
-}
-
-// Work item for NewSubRange().
-struct SubRange {
-  SubRange(CordRep* a_node, size_t a_pos, size_t a_n)
-      : node(a_node), pos(a_pos), n(a_n) {}
-  CordRep* node;  // nullptr means concat last 2 results.
-  size_t pos;
-  size_t n;
-};
-
-static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) {
-  absl::InlinedVector<CordRep*, kInlinedVectorSize> results;
-  absl::InlinedVector<SubRange, kInlinedVectorSize> todo;
-  todo.push_back(SubRange(node, pos, n));
-  do {
-    const SubRange& sr = todo.back();
-    node = sr.node;
-    pos = sr.pos;
-    n = sr.n;
-    todo.pop_back();
-
-    if (node == nullptr) {
-      assert(results.size() >= 2);
-      CordRep* right = results.back();
-      results.pop_back();
-      CordRep* left = results.back();
-      results.pop_back();
-      results.push_back(Concat(left, right));
-    } else if (pos == 0 && n == node->length) {
-      results.push_back(Ref(node));
-    } else if (node->tag != CONCAT) {
-      if (node->tag == SUBSTRING) {
-        pos += node->substring()->start;
-        node = node->substring()->child;
-      }
-      results.push_back(NewSubstring(Ref(node), pos, n));
-    } else if (pos + n <= node->concat()->left->length) {
-      todo.push_back(SubRange(node->concat()->left, pos, n));
-    } else if (pos >= node->concat()->left->length) {
-      pos -= node->concat()->left->length;
-      todo.push_back(SubRange(node->concat()->right, pos, n));
+    auto constexpr method = CordzUpdateTracker::kRemoveSuffix;
+    CordzUpdateScope scope(contents_.cordz_info(), method);
+    tree = cord_internal::RemoveCrcNode(tree);
+    if (n >= tree->length) {
+      CordRep::Unref(tree);
+      tree = nullptr;
+    } else if (tree->IsBtree()) {
+      tree = CordRepBtree::RemoveSuffix(tree->btree(), n);
+    } else if (!tree->IsExternal() && tree->refcount.IsOne()) {
+      assert(tree->IsFlat() || tree->IsSubstring());
+      tree->length -= n;
     } else {
-      size_t left_n = node->concat()->left->length - pos;
-      todo.push_back(SubRange(nullptr, 0, 0));  // Concat()
-      todo.push_back(SubRange(node->concat()->right, 0, n - left_n));
-      todo.push_back(SubRange(node->concat()->left, pos, left_n));
+      CordRep* rep = CordRepSubstring::Substring(tree, 0, tree->length - n);
+      CordRep::Unref(tree);
+      tree = rep;
     }
-  } while (!todo.empty());
-  assert(results.size() == 1);
-  return results[0];
+    contents_.SetTreeOrEmpty(tree, scope);
+  }
 }
 
 Cord Cord::Subcord(size_t pos, size_t new_size) const {
@@ -1061,17 +734,19 @@
   size_t length = size();
   if (pos > length) pos = length;
   if (new_size > length - pos) new_size = length - pos;
+  if (new_size == 0) return sub_cord;
+
   CordRep* tree = contents_.tree();
   if (tree == nullptr) {
-    // sub_cord is newly constructed, no need to re-zero-out the tail of
-    // contents_ memory.
-    sub_cord.contents_.set_data(contents_.data() + pos, new_size, false);
-  } else if (new_size == 0) {
-    // We want to return empty subcord, so nothing to do.
-  } else if (new_size <= InlineRep::kMaxInline) {
+    sub_cord.contents_.set_data(contents_.data() + pos, new_size);
+    return sub_cord;
+  }
+
+  if (new_size <= InlineRep::kMaxInline) {
+    sub_cord.contents_.set_inline_size(new_size);
+    char* dest = sub_cord.contents_.data_.as_chars();
     Cord::ChunkIterator it = chunk_begin();
     it.AdvanceBytes(pos);
-    char* dest = sub_cord.contents_.data_;
     size_t remaining_size = new_size;
     while (remaining_size > it->size()) {
       cord_internal::SmallMemmove(dest, it->data(), it->size());
@@ -1080,154 +755,21 @@
       ++it;
     }
     cord_internal::SmallMemmove(dest, it->data(), remaining_size);
-    sub_cord.contents_.data_[InlineRep::kMaxInline] = new_size;
+    return sub_cord;
+  }
+
+  tree = cord_internal::SkipCrcNode(tree);
+  if (tree->IsBtree()) {
+    tree = tree->btree()->SubTree(pos, new_size);
   } else {
-    sub_cord.contents_.set_tree(NewSubRange(tree, pos, new_size));
+    tree = CordRepSubstring::Substring(tree, pos, new_size);
   }
+  sub_cord.contents_.EmplaceTree(tree, contents_.data_,
+                                 CordzUpdateTracker::kSubCord);
   return sub_cord;
 }
 
 // --------------------------------------------------------------------
-// Balancing
-
-class CordForest {
- public:
-  explicit CordForest(size_t length)
-      : root_length_(length), trees_(kMinLengthSize, nullptr) {}
-
-  void Build(CordRep* cord_root) {
-    std::vector<CordRep*> pending = {cord_root};
-
-    while (!pending.empty()) {
-      CordRep* node = pending.back();
-      pending.pop_back();
-      CheckNode(node);
-      if (ABSL_PREDICT_FALSE(node->tag != CONCAT)) {
-        AddNode(node);
-        continue;
-      }
-
-      CordRepConcat* concat_node = node->concat();
-      if (concat_node->depth() >= kMinLengthSize ||
-          concat_node->length < min_length[concat_node->depth()]) {
-        pending.push_back(concat_node->right);
-        pending.push_back(concat_node->left);
-
-        if (concat_node->refcount.IsOne()) {
-          concat_node->left = concat_freelist_;
-          concat_freelist_ = concat_node;
-        } else {
-          Ref(concat_node->right);
-          Ref(concat_node->left);
-          Unref(concat_node);
-        }
-      } else {
-        AddNode(node);
-      }
-    }
-  }
-
-  CordRep* ConcatNodes() {
-    CordRep* sum = nullptr;
-    for (auto* node : trees_) {
-      if (node == nullptr) continue;
-
-      sum = PrependNode(node, sum);
-      root_length_ -= node->length;
-      if (root_length_ == 0) break;
-    }
-    ABSL_INTERNAL_CHECK(sum != nullptr, "Failed to locate sum node");
-    return VerifyTree(sum);
-  }
-
- private:
-  CordRep* AppendNode(CordRep* node, CordRep* sum) {
-    return (sum == nullptr) ? node : MakeConcat(sum, node);
-  }
-
-  CordRep* PrependNode(CordRep* node, CordRep* sum) {
-    return (sum == nullptr) ? node : MakeConcat(node, sum);
-  }
-
-  void AddNode(CordRep* node) {
-    CordRep* sum = nullptr;
-
-    // Collect together everything with which we will merge with node
-    int i = 0;
-    for (; node->length > min_length[i + 1]; ++i) {
-      auto& tree_at_i = trees_[i];
-
-      if (tree_at_i == nullptr) continue;
-      sum = PrependNode(tree_at_i, sum);
-      tree_at_i = nullptr;
-    }
-
-    sum = AppendNode(node, sum);
-
-    // Insert sum into appropriate place in the forest
-    for (; sum->length >= min_length[i]; ++i) {
-      auto& tree_at_i = trees_[i];
-      if (tree_at_i == nullptr) continue;
-
-      sum = MakeConcat(tree_at_i, sum);
-      tree_at_i = nullptr;
-    }
-
-    // min_length[0] == 1, which means sum->length >= min_length[0]
-    assert(i > 0);
-    trees_[i - 1] = sum;
-  }
-
-  // Make concat node trying to resue existing CordRepConcat nodes we
-  // already collected in the concat_freelist_.
-  CordRep* MakeConcat(CordRep* left, CordRep* right) {
-    if (concat_freelist_ == nullptr) return RawConcat(left, right);
-
-    CordRepConcat* rep = concat_freelist_;
-    if (concat_freelist_->left == nullptr) {
-      concat_freelist_ = nullptr;
-    } else {
-      concat_freelist_ = concat_freelist_->left->concat();
-    }
-    SetConcatChildren(rep, left, right);
-
-    return rep;
-  }
-
-  static void CheckNode(CordRep* node) {
-    ABSL_INTERNAL_CHECK(node->length != 0u, "");
-    if (node->tag == CONCAT) {
-      ABSL_INTERNAL_CHECK(node->concat()->left != nullptr, "");
-      ABSL_INTERNAL_CHECK(node->concat()->right != nullptr, "");
-      ABSL_INTERNAL_CHECK(node->length == (node->concat()->left->length +
-                                           node->concat()->right->length),
-                          "");
-    }
-  }
-
-  size_t root_length_;
-
-  // use an inlined vector instead of a flat array to get bounds checking
-  absl::InlinedVector<CordRep*, kInlinedVectorSize> trees_;
-
-  // List of concat nodes we can re-use for Cord balancing.
-  CordRepConcat* concat_freelist_ = nullptr;
-};
-
-static CordRep* Rebalance(CordRep* node) {
-  VerifyTree(node);
-  assert(node->tag == CONCAT);
-
-  if (node->length == 0) {
-    return nullptr;
-  }
-
-  CordForest forest(node->length);
-  forest.Build(node);
-  return forest.ConcatNodes();
-}
-
-// --------------------------------------------------------------------
 // Comparators
 
 namespace {
@@ -1252,7 +794,7 @@
 }
 
 // This overload set computes comparison results from memcmp result. This
-// interface is used inside GenericCompare below. Differet implementations
+// interface is used inside GenericCompare below. Different implementations
 // are specialized for int and bool. For int we clamp result to {-1, 0, 1}
 // set. For bool we just interested in "value == 0".
 template <typename ResultType>
@@ -1266,26 +808,29 @@
 
 }  // namespace
 
-// Helper routine. Locates the first flat chunk of the Cord without
-// initializing the iterator.
+// Helper routine. Locates the first flat or external chunk of the Cord without
+// initializing the iterator, and returns a string_view referencing the data.
 inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
-  size_t n = data_[kMaxInline];
-  if (n <= kMaxInline) {
-    return absl::string_view(data_, n);
+  if (!is_tree()) {
+    return absl::string_view(data_.as_chars(), data_.inline_size());
   }
 
-  CordRep* node = tree();
-  if (node->tag >= FLAT) {
-    return absl::string_view(node->data, node->length);
+  CordRep* node = cord_internal::SkipCrcNode(tree());
+  if (node->IsFlat()) {
+    return absl::string_view(node->flat()->Data(), node->length);
   }
 
-  if (node->tag == EXTERNAL) {
+  if (node->IsExternal()) {
     return absl::string_view(node->external()->base, node->length);
   }
 
-  // Walk down the left branches until we hit a non-CONCAT node.
-  while (node->tag == CONCAT) {
-    node = node->concat()->left;
+  if (node->IsBtree()) {
+    CordRepBtree* tree = node->btree();
+    int height = tree->height();
+    while (--height >= 0) {
+      tree = tree->Edge(CordRepBtree::kFront)->btree();
+    }
+    return tree->Data(tree->begin());
   }
 
   // Get the child node if we encounter a SUBSTRING.
@@ -1293,20 +838,60 @@
   size_t length = node->length;
   assert(length != 0);
 
-  if (node->tag == SUBSTRING) {
+  if (node->IsSubstring()) {
     offset = node->substring()->start;
     node = node->substring()->child;
   }
 
-  if (node->tag >= FLAT) {
-    return absl::string_view(node->data + offset, length);
+  if (node->IsFlat()) {
+    return absl::string_view(node->flat()->Data() + offset, length);
   }
 
-  assert((node->tag == EXTERNAL) && "Expect FLAT or EXTERNAL node here");
+  assert(node->IsExternal() && "Expect FLAT or EXTERNAL node here");
 
   return absl::string_view(node->external()->base + offset, length);
 }
 
+void Cord::SetCrcCordState(crc_internal::CrcCordState state) {
+  auto constexpr method = CordzUpdateTracker::kSetExpectedChecksum;
+  if (empty()) {
+    contents_.MaybeRemoveEmptyCrcNode();
+    CordRep* rep = CordRepCrc::New(nullptr, std::move(state));
+    contents_.EmplaceTree(rep, method);
+  } else if (!contents_.is_tree()) {
+    CordRep* rep = contents_.MakeFlatWithExtraCapacity(0);
+    rep = CordRepCrc::New(rep, std::move(state));
+    contents_.EmplaceTree(rep, method);
+  } else {
+    const CordzUpdateScope scope(contents_.data_.cordz_info(), method);
+    CordRep* rep = CordRepCrc::New(contents_.data_.as_tree(), std::move(state));
+    contents_.SetTree(rep, scope);
+  }
+}
+
+void Cord::SetExpectedChecksum(uint32_t crc) {
+  // Construct a CrcCordState with a single chunk.
+  crc_internal::CrcCordState state;
+  state.mutable_rep()->prefix_crc.push_back(
+      crc_internal::CrcCordState::PrefixCrc(size(), absl::crc32c_t{crc}));
+  SetCrcCordState(std::move(state));
+}
+
+const crc_internal::CrcCordState* Cord::MaybeGetCrcCordState() const {
+  if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
+    return nullptr;
+  }
+  return &contents_.tree()->crc()->crc_cord_state;
+}
+
+absl::optional<uint32_t> Cord::ExpectedChecksum() const {
+  if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
+    return absl::nullopt;
+  }
+  return static_cast<uint32_t>(
+      contents_.tree()->crc()->crc_cord_state.Checksum());
+}
+
 inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size,
                                  size_t size_to_compare) const {
   auto advance = [](Cord::ChunkIterator* it, absl::string_view* chunk) {
@@ -1372,6 +957,7 @@
 }
 
 inline absl::string_view Cord::GetFirstChunk(const Cord& c) {
+  if (c.empty()) return {};
   return c.contents_.FindFlatStartPiece();
 }
 inline absl::string_view Cord::GetFirstChunk(absl::string_view sv) {
@@ -1482,50 +1068,11 @@
   }
 }
 
-Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
-  ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
-                        "Attempted to iterate past `end()`");
-  assert(bytes_remaining_ >= current_chunk_.size());
-  bytes_remaining_ -= current_chunk_.size();
-
-  if (stack_of_right_children_.empty()) {
-    assert(!current_chunk_.empty());  // Called on invalid iterator.
-    // We have reached the end of the Cord.
-    return *this;
-  }
-
-  // Process the next node on the stack.
-  CordRep* node = stack_of_right_children_.back();
-  stack_of_right_children_.pop_back();
-
-  // Walk down the left branches until we hit a non-CONCAT node. Save the
-  // right children to the stack for subsequent traversal.
-  while (node->tag == CONCAT) {
-    stack_of_right_children_.push_back(node->concat()->right);
-    node = node->concat()->left;
-  }
-
-  // Get the child node if we encounter a SUBSTRING.
-  size_t offset = 0;
-  size_t length = node->length;
-  if (node->tag == SUBSTRING) {
-    offset = node->substring()->start;
-    node = node->substring()->child;
-  }
-
-  assert(node->tag == EXTERNAL || node->tag >= FLAT);
-  assert(length != 0);
-  const char* data =
-      node->tag == EXTERNAL ? node->external()->base : node->data;
-  current_chunk_ = absl::string_view(data + offset, length);
-  current_leaf_ = node;
-  return *this;
-}
-
 Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
   ABSL_HARDENING_ASSERT(bytes_remaining_ >= n &&
                         "Attempted to iterate past `end()`");
   Cord subcord;
+  auto constexpr method = CordzUpdateTracker::kCordReader;
 
   if (n <= InlineRep::kMaxInline) {
     // Range to read fits in inline data. Flatten it.
@@ -1544,156 +1091,52 @@
     }
     return subcord;
   }
-  if (n < current_chunk_.size()) {
-    // Range to read is a proper subrange of the current chunk.
-    assert(current_leaf_ != nullptr);
-    CordRep* subnode = Ref(current_leaf_);
-    const char* data =
-        subnode->tag == EXTERNAL ? subnode->external()->base : subnode->data;
-    subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
-    subcord.contents_.set_tree(VerifyTree(subnode));
-    RemoveChunkPrefix(n);
+
+  if (btree_reader_) {
+    size_t chunk_size = current_chunk_.size();
+    if (n <= chunk_size && n <= kMaxBytesToCopy) {
+      subcord = Cord(current_chunk_.substr(0, n), method);
+      if (n < chunk_size) {
+        current_chunk_.remove_prefix(n);
+      } else {
+        current_chunk_ = btree_reader_.Next();
+      }
+    } else {
+      CordRep* rep;
+      current_chunk_ = btree_reader_.Read(n, chunk_size, rep);
+      subcord.contents_.EmplaceTree(rep, method);
+    }
+    bytes_remaining_ -= n;
     return subcord;
   }
 
-  // Range to read begins with a proper subrange of the current chunk.
-  assert(!current_chunk_.empty());
+  // Short circuit if reading the entire data edge.
   assert(current_leaf_ != nullptr);
-  CordRep* subnode = Ref(current_leaf_);
-  if (current_chunk_.size() < subnode->length) {
-    const char* data =
-        subnode->tag == EXTERNAL ? subnode->external()->base : subnode->data;
-    subnode = NewSubstring(subnode, current_chunk_.data() - data,
-                           current_chunk_.size());
-  }
-  n -= current_chunk_.size();
-  bytes_remaining_ -= current_chunk_.size();
-
-  // Process the next node(s) on the stack, reading whole subtrees depending on
-  // their length and how many bytes we are advancing.
-  CordRep* node = nullptr;
-  while (!stack_of_right_children_.empty()) {
-    node = stack_of_right_children_.back();
-    stack_of_right_children_.pop_back();
-    if (node->length > n) break;
-    // TODO(qrczak): This might unnecessarily recreate existing concat nodes.
-    // Avoiding that would need pretty complicated logic (instead of
-    // current_leaf_, keep current_subtree_ which points to the highest node
-    // such that the current leaf can be found on the path of left children
-    // starting from current_subtree_; delay creating subnode while node is
-    // below current_subtree_; find the proper node along the path of left
-    // children starting from current_subtree_ if this loop exits while staying
-    // below current_subtree_; etc.; alternatively, push parents instead of
-    // right children on the stack).
-    subnode = Concat(subnode, Ref(node));
-    n -= node->length;
-    bytes_remaining_ -= node->length;
-    node = nullptr;
-  }
-
-  if (node == nullptr) {
-    // We have reached the end of the Cord.
-    assert(bytes_remaining_ == 0);
-    subcord.contents_.set_tree(VerifyTree(subnode));
+  if (n == current_leaf_->length) {
+    bytes_remaining_ = 0;
+    current_chunk_ = {};
+    CordRep* tree = CordRep::Ref(current_leaf_);
+    subcord.contents_.EmplaceTree(VerifyTree(tree), method);
     return subcord;
   }
 
-  // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
-  // right children to the stack for subsequent traversal.
-  while (node->tag == CONCAT) {
-    if (node->concat()->left->length > n) {
-      // Push right, descend left.
-      stack_of_right_children_.push_back(node->concat()->right);
-      node = node->concat()->left;
-    } else {
-      // Read left, descend right.
-      subnode = Concat(subnode, Ref(node->concat()->left));
-      n -= node->concat()->left->length;
-      bytes_remaining_ -= node->concat()->left->length;
-      node = node->concat()->right;
-    }
-  }
+  // From this point on, we need a partial substring node.
+  // Get pointer to the underlying flat or external data payload and
+  // compute data pointer and offset into current flat or external.
+  CordRep* payload = current_leaf_->IsSubstring()
+                         ? current_leaf_->substring()->child
+                         : current_leaf_;
+  const char* data = payload->IsExternal() ? payload->external()->base
+                                           : payload->flat()->Data();
+  const size_t offset = static_cast<size_t>(current_chunk_.data() - data);
 
-  // Get the child node if we encounter a SUBSTRING.
-  size_t offset = 0;
-  size_t length = node->length;
-  if (node->tag == SUBSTRING) {
-    offset = node->substring()->start;
-    node = node->substring()->child;
-  }
-
-  // Range to read ends with a proper (possibly empty) subrange of the current
-  // chunk.
-  assert(node->tag == EXTERNAL || node->tag >= FLAT);
-  assert(length > n);
-  if (n > 0) subnode = Concat(subnode, NewSubstring(Ref(node), offset, n));
-  const char* data =
-      node->tag == EXTERNAL ? node->external()->base : node->data;
-  current_chunk_ = absl::string_view(data + offset + n, length - n);
-  current_leaf_ = node;
+  auto* tree = CordRepSubstring::Substring(payload, offset, n);
+  subcord.contents_.EmplaceTree(VerifyTree(tree), method);
   bytes_remaining_ -= n;
-  subcord.contents_.set_tree(VerifyTree(subnode));
+  current_chunk_.remove_prefix(n);
   return subcord;
 }
 
-void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
-  assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`");
-  assert(n >= current_chunk_.size());  // This should only be called when
-                                       // iterating to a new node.
-
-  n -= current_chunk_.size();
-  bytes_remaining_ -= current_chunk_.size();
-
-  // Process the next node(s) on the stack, skipping whole subtrees depending on
-  // their length and how many bytes we are advancing.
-  CordRep* node = nullptr;
-  while (!stack_of_right_children_.empty()) {
-    node = stack_of_right_children_.back();
-    stack_of_right_children_.pop_back();
-    if (node->length > n) break;
-    n -= node->length;
-    bytes_remaining_ -= node->length;
-    node = nullptr;
-  }
-
-  if (node == nullptr) {
-    // We have reached the end of the Cord.
-    assert(bytes_remaining_ == 0);
-    return;
-  }
-
-  // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
-  // right children to the stack for subsequent traversal.
-  while (node->tag == CONCAT) {
-    if (node->concat()->left->length > n) {
-      // Push right, descend left.
-      stack_of_right_children_.push_back(node->concat()->right);
-      node = node->concat()->left;
-    } else {
-      // Skip left, descend right.
-      n -= node->concat()->left->length;
-      bytes_remaining_ -= node->concat()->left->length;
-      node = node->concat()->right;
-    }
-  }
-
-  // Get the child node if we encounter a SUBSTRING.
-  size_t offset = 0;
-  size_t length = node->length;
-  if (node->tag == SUBSTRING) {
-    offset = node->substring()->start;
-    node = node->substring()->child;
-  }
-
-  assert(node->tag == EXTERNAL || node->tag >= FLAT);
-  assert(length > n);
-  const char* data =
-      node->tag == EXTERNAL ? node->external()->base : node->data;
-  current_chunk_ = absl::string_view(data + offset + n, length - n);
-  current_leaf_ = node;
-  bytes_remaining_ -= n;
-}
-
 char Cord::operator[](size_t i) const {
   ABSL_HARDENING_ASSERT(i < size());
   size_t offset = i;
@@ -1701,28 +1144,21 @@
   if (rep == nullptr) {
     return contents_.data()[i];
   }
+  rep = cord_internal::SkipCrcNode(rep);
   while (true) {
     assert(rep != nullptr);
     assert(offset < rep->length);
-    if (rep->tag >= FLAT) {
+    if (rep->IsFlat()) {
       // Get the "i"th character directly from the flat array.
-      return rep->data[offset];
-    } else if (rep->tag == EXTERNAL) {
+      return rep->flat()->Data()[offset];
+    } else if (rep->IsBtree()) {
+      return rep->btree()->GetCharacter(offset);
+    } else if (rep->IsExternal()) {
       // Get the "i"th character from the external array.
       return rep->external()->base[offset];
-    } else if (rep->tag == CONCAT) {
-      // Recursively branch to the side of the concatenation that the "i"th
-      // character is on.
-      size_t left_length = rep->concat()->left->length;
-      if (offset < left_length) {
-        rep = rep->concat()->left;
-      } else {
-        offset -= left_length;
-        rep = rep->concat()->right;
-      }
     } else {
       // This must be a substring a node, so bypass it to get to the child.
-      assert(rep->tag == SUBSTRING);
+      assert(rep->IsSubstring());
       offset += rep->substring()->start;
       rep = rep->substring()->child;
     }
@@ -1730,6 +1166,7 @@
 }
 
 absl::string_view Cord::FlattenSlowPath() {
+  assert(contents_.is_tree());
   size_t total_size = size();
   CordRep* new_rep;
   char* new_buffer;
@@ -1737,9 +1174,9 @@
   // Try to put the contents into a new flat rep. If they won't fit in the
   // biggest possible flat node, use an external rep instead.
   if (total_size <= kMaxFlatLength) {
-    new_rep = NewFlat(total_size);
+    new_rep = CordRepFlat::New(total_size);
     new_rep->length = total_size;
-    new_buffer = new_rep->data;
+    new_buffer = new_rep->flat()->Data();
     CopyToArraySlowPath(new_buffer);
   } else {
     new_buffer = std::allocator<char>().allocate(total_size);
@@ -1750,29 +1187,40 @@
                                             s.size());
         });
   }
-  Unref(contents_.tree());
-  contents_.set_tree(new_rep);
+  CordzUpdateScope scope(contents_.cordz_info(), CordzUpdateTracker::kFlatten);
+  CordRep::Unref(contents_.as_tree());
+  contents_.SetTree(new_rep, scope);
   return absl::string_view(new_buffer, total_size);
 }
 
 /* static */ bool Cord::GetFlatAux(CordRep* rep, absl::string_view* fragment) {
   assert(rep != nullptr);
-  if (rep->tag >= FLAT) {
-    *fragment = absl::string_view(rep->data, rep->length);
+  if (rep->length == 0) {
+    *fragment = absl::string_view();
     return true;
-  } else if (rep->tag == EXTERNAL) {
+  }
+  rep = cord_internal::SkipCrcNode(rep);
+  if (rep->IsFlat()) {
+    *fragment = absl::string_view(rep->flat()->Data(), rep->length);
+    return true;
+  } else if (rep->IsExternal()) {
     *fragment = absl::string_view(rep->external()->base, rep->length);
     return true;
-  } else if (rep->tag == SUBSTRING) {
+  } else if (rep->IsBtree()) {
+    return rep->btree()->IsFlat(fragment);
+  } else if (rep->IsSubstring()) {
     CordRep* child = rep->substring()->child;
-    if (child->tag >= FLAT) {
-      *fragment =
-          absl::string_view(child->data + rep->substring()->start, rep->length);
+    if (child->IsFlat()) {
+      *fragment = absl::string_view(
+          child->flat()->Data() + rep->substring()->start, rep->length);
       return true;
-    } else if (child->tag == EXTERNAL) {
+    } else if (child->IsExternal()) {
       *fragment = absl::string_view(
           child->external()->base + rep->substring()->start, rep->length);
       return true;
+    } else if (child->IsBtree()) {
+      return child->btree()->IsFlat(rep->substring()->start, rep->length,
+                                    fragment);
     }
   }
   return false;
@@ -1782,49 +1230,31 @@
     absl::cord_internal::CordRep* rep,
     absl::FunctionRef<void(absl::string_view)> callback) {
   assert(rep != nullptr);
-  int stack_pos = 0;
-  constexpr int stack_max = 128;
-  // Stack of right branches for tree traversal
-  absl::cord_internal::CordRep* stack[stack_max];
-  absl::cord_internal::CordRep* current_node = rep;
-  while (true) {
-    if (current_node->tag == CONCAT) {
-      if (stack_pos == stack_max) {
-        // There's no more room on our stack array to add another right branch,
-        // and the idea is to avoid allocations, so call this function
-        // recursively to navigate this subtree further.  (This is not something
-        // we expect to happen in practice).
-        ForEachChunkAux(current_node, callback);
+  if (rep->length == 0) return;
+  rep = cord_internal::SkipCrcNode(rep);
 
-        // Pop the next right branch and iterate.
-        current_node = stack[--stack_pos];
-        continue;
-      } else {
-        // Save the right branch for later traversal and continue down the left
-        // branch.
-        stack[stack_pos++] = current_node->concat()->right;
-        current_node = current_node->concat()->left;
-        continue;
-      }
+  if (rep->IsBtree()) {
+    ChunkIterator it(rep), end;
+    while (it != end) {
+      callback(*it);
+      ++it;
     }
-    // This is a leaf node, so invoke our callback.
-    absl::string_view chunk;
-    bool success = GetFlatAux(current_node, &chunk);
-    assert(success);
-    if (success) {
-      callback(chunk);
-    }
-    if (stack_pos == 0) {
-      // end of traversal
-      return;
-    }
-    current_node = stack[--stack_pos];
+    return;
+  }
+
+  // This is a leaf node, so invoke our callback.
+  absl::cord_internal::CordRep* current_node = cord_internal::SkipCrcNode(rep);
+  absl::string_view chunk;
+  bool success = GetFlatAux(current_node, &chunk);
+  assert(success);
+  if (success) {
+    callback(chunk);
   }
 }
 
-static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) {
+static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
+                     int indent) {
   const int kIndentStep = 1;
-  int indent = 0;
   absl::InlinedVector<CordRep*, kInlinedVectorSize> stack;
   absl::InlinedVector<int, kInlinedVectorSize> indents;
   for (;;) {
@@ -1833,30 +1263,36 @@
     *os << " [";
     if (include_data) *os << static_cast<void*>(rep);
     *os << "]";
-    *os << " " << (IsRootBalanced(rep) ? 'b' : 'u');
     *os << " " << std::setw(indent) << "";
-    if (rep->tag == CONCAT) {
-      *os << "CONCAT depth=" << Depth(rep) << "\n";
+    bool leaf = false;
+    if (rep == nullptr) {
+      *os << "NULL\n";
+      leaf = true;
+    } else if (rep->IsCrc()) {
+      *os << "CRC crc=" << rep->crc()->crc_cord_state.Checksum() << "\n";
       indent += kIndentStep;
-      indents.push_back(indent);
-      stack.push_back(rep->concat()->right);
-      rep = rep->concat()->left;
-    } else if (rep->tag == SUBSTRING) {
+      rep = rep->crc()->child;
+    } else if (rep->IsSubstring()) {
       *os << "SUBSTRING @ " << rep->substring()->start << "\n";
       indent += kIndentStep;
       rep = rep->substring()->child;
-    } else {  // Leaf
-      if (rep->tag == EXTERNAL) {
+    } else {  // Leaf or ring
+      leaf = true;
+      if (rep->IsExternal()) {
         *os << "EXTERNAL [";
         if (include_data)
           *os << absl::CEscape(std::string(rep->external()->base, rep->length));
         *os << "]\n";
-      } else {
-        *os << "FLAT cap=" << TagToLength(rep->tag) << " [";
+      } else if (rep->IsFlat()) {
+        *os << "FLAT cap=" << rep->flat()->Capacity() << " [";
         if (include_data)
-          *os << absl::CEscape(std::string(rep->data, rep->length));
+          *os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
         *os << "]\n";
+      } else {
+        CordRepBtree::Dump(rep, /*label=*/ "", include_data, *os);
       }
+    }
+    if (leaf) {
       if (stack.empty()) break;
       rep = stack.back();
       stack.pop_back();
@@ -1875,7 +1311,7 @@
 }
 
 static bool VerifyNode(CordRep* root, CordRep* start_node,
-                       bool full_validation) {
+                       bool /* full_validation */) {
   absl::InlinedVector<CordRep*, 2> worklist;
   worklist.push_back(start_node);
   do {
@@ -1885,108 +1321,53 @@
     ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node));
     if (node != root) {
       ABSL_INTERNAL_CHECK(node->length != 0, ReportError(root, node));
+      ABSL_INTERNAL_CHECK(!node->IsCrc(), ReportError(root, node));
     }
 
-    if (node->tag == CONCAT) {
-      ABSL_INTERNAL_CHECK(node->concat()->left != nullptr,
+    if (node->IsFlat()) {
+      ABSL_INTERNAL_CHECK(node->length <= node->flat()->Capacity(),
                           ReportError(root, node));
-      ABSL_INTERNAL_CHECK(node->concat()->right != nullptr,
-                          ReportError(root, node));
-      ABSL_INTERNAL_CHECK((node->length == node->concat()->left->length +
-                                               node->concat()->right->length),
-                          ReportError(root, node));
-      if (full_validation) {
-        worklist.push_back(node->concat()->right);
-        worklist.push_back(node->concat()->left);
-      }
-    } else if (node->tag >= FLAT) {
-      ABSL_INTERNAL_CHECK(node->length <= TagToLength(node->tag),
-                          ReportError(root, node));
-    } else if (node->tag == EXTERNAL) {
+    } else if (node->IsExternal()) {
       ABSL_INTERNAL_CHECK(node->external()->base != nullptr,
                           ReportError(root, node));
-    } else if (node->tag == SUBSTRING) {
+    } else if (node->IsSubstring()) {
       ABSL_INTERNAL_CHECK(
           node->substring()->start < node->substring()->child->length,
           ReportError(root, node));
       ABSL_INTERNAL_CHECK(node->substring()->start + node->length <=
                               node->substring()->child->length,
                           ReportError(root, node));
+    } else if (node->IsCrc()) {
+      ABSL_INTERNAL_CHECK(
+          node->crc()->child != nullptr || node->crc()->length == 0,
+          ReportError(root, node));
+      if (node->crc()->child != nullptr) {
+        ABSL_INTERNAL_CHECK(node->crc()->length == node->crc()->child->length,
+                            ReportError(root, node));
+        worklist.push_back(node->crc()->child);
+      }
     }
   } while (!worklist.empty());
   return true;
 }
 
-// Traverses the tree and computes the total memory allocated.
-/* static */ size_t Cord::MemoryUsageAux(const CordRep* rep) {
-  size_t total_mem_usage = 0;
-
-  // Allow a quick exit for the common case that the root is a leaf.
-  if (RepMemoryUsageLeaf(rep, &total_mem_usage)) {
-    return total_mem_usage;
-  }
-
-  // Iterate over the tree. cur_node is never a leaf node and leaf nodes will
-  // never be appended to tree_stack. This reduces overhead from manipulating
-  // tree_stack.
-  absl::InlinedVector<const CordRep*, kInlinedVectorSize> tree_stack;
-  const CordRep* cur_node = rep;
-  while (true) {
-    const CordRep* next_node = nullptr;
-
-    if (cur_node->tag == CONCAT) {
-      total_mem_usage += sizeof(CordRepConcat);
-      const CordRep* left = cur_node->concat()->left;
-      if (!RepMemoryUsageLeaf(left, &total_mem_usage)) {
-        next_node = left;
-      }
-
-      const CordRep* right = cur_node->concat()->right;
-      if (!RepMemoryUsageLeaf(right, &total_mem_usage)) {
-        if (next_node) {
-          tree_stack.push_back(next_node);
-        }
-        next_node = right;
-      }
-    } else {
-      // Since cur_node is not a leaf or a concat node it must be a substring.
-      assert(cur_node->tag == SUBSTRING);
-      total_mem_usage += sizeof(CordRepSubstring);
-      next_node = cur_node->substring()->child;
-      if (RepMemoryUsageLeaf(next_node, &total_mem_usage)) {
-        next_node = nullptr;
-      }
-    }
-
-    if (!next_node) {
-      if (tree_stack.empty()) {
-        return total_mem_usage;
-      }
-      next_node = tree_stack.back();
-      tree_stack.pop_back();
-    }
-    cur_node = next_node;
-  }
-}
-
 std::ostream& operator<<(std::ostream& out, const Cord& cord) {
   for (absl::string_view chunk : cord.Chunks()) {
-    out.write(chunk.data(), chunk.size());
+    out.write(chunk.data(), static_cast<std::streamsize>(chunk.size()));
   }
   return out;
 }
 
 namespace strings_internal {
-size_t CordTestAccess::FlatOverhead() { return kFlatOverhead; }
-size_t CordTestAccess::MaxFlatLength() { return kMaxFlatLength; }
+size_t CordTestAccess::FlatOverhead() { return cord_internal::kFlatOverhead; }
+size_t CordTestAccess::MaxFlatLength() { return cord_internal::kMaxFlatLength; }
 size_t CordTestAccess::FlatTagToLength(uint8_t tag) {
-  return TagToLength(tag);
+  return cord_internal::TagToLength(tag);
 }
 uint8_t CordTestAccess::LengthToTag(size_t s) {
   ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, absl::StrCat("Invalid length ", s));
-  return AllocatedSizeToTag(s + kFlatOverhead);
+  return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
 }
-size_t CordTestAccess::SizeofCordRepConcat() { return sizeof(CordRepConcat); }
 size_t CordTestAccess::SizeofCordRepExternal() {
   return sizeof(CordRepExternal);
 }
diff --git a/abseil-cpp/absl/strings/cord.h b/abseil-cpp/absl/strings/cord.h
index b8b251b..457ccf0 100644
--- a/abseil-cpp/absl/strings/cord.h
+++ b/abseil-cpp/absl/strings/cord.h
@@ -20,12 +20,11 @@
 // structure. A Cord is a string-like sequence of characters optimized for
 // specific use cases. Unlike a `std::string`, which stores an array of
 // contiguous characters, Cord data is stored in a structure consisting of
-// separate, reference-counted "chunks." (Currently, this implementation is a
-// tree structure, though that implementation may change.)
+// separate, reference-counted "chunks."
 //
 // Because a Cord consists of these chunks, data can be added to or removed from
 // a Cord during its lifetime. Chunks may also be shared between Cords. Unlike a
-// `std::string`, a Cord can therefore accomodate data that changes over its
+// `std::string`, a Cord can therefore accommodate data that changes over its
 // lifetime, though it's not quite "mutable"; it can change only in the
 // attachment, detachment, or rearrangement of chunks of its constituent data.
 //
@@ -70,15 +69,31 @@
 #include <string>
 #include <type_traits>
 
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/endian.h"
 #include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/macros.h"
 #include "absl/base/port.h"
 #include "absl/container/inlined_vector.h"
+#include "absl/crc/internal/crc_cord_state.h"
 #include "absl/functional/function_ref.h"
 #include "absl/meta/type_traits.h"
+#include "absl/strings/cord_analysis.h"
+#include "absl/strings/cord_buffer.h"
+#include "absl/strings/internal/cord_data_edge.h"
 #include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_btree_reader.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/internal/cordz_functions.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_scope.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
 #include "absl/strings/internal/resize_uninitialized.h"
+#include "absl/strings/internal/string_constant.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/optional.h"
 
@@ -90,6 +105,41 @@
 Cord MakeCordFromExternal(absl::string_view, Releaser&&);
 void CopyCordToString(const Cord& src, std::string* dst);
 
+// Cord memory accounting modes
+enum class CordMemoryAccounting {
+  // Counts the *approximate* number of bytes held in full or in part by this
+  // Cord (which may not remain the same between invocations). Cords that share
+  // memory could each be "charged" independently for the same shared memory.
+  // See also comment on `kTotalMorePrecise` on internally shared memory.
+  kTotal,
+
+  // Counts the *approximate* number of bytes held in full or in part by this
+  // Cord for the distinct memory held by this cord. This option is similar
+  // to `kTotal`, except that if the cord has multiple references to the same
+  // memory, that memory is only counted once.
+  //
+  // For example:
+  //   absl::Cord cord;
+  //   cord.append(some_other_cord);
+  //   cord.append(some_other_cord);
+  //   // Counts `some_other_cord` twice:
+  //   cord.EstimatedMemoryUsage(kTotal);
+  //   // Counts `some_other_cord` once:
+  //   cord.EstimatedMemoryUsage(kTotalMorePrecise);
+  //
+  // The `kTotalMorePrecise` number is more expensive to compute as it requires
+  // deduplicating all memory references. Applications should prefer to use
+  // `kFairShare` or `kTotal` unless they really need a more precise estimate
+  // on "how much memory is potentially held / kept alive by this cord?"
+  kTotalMorePrecise,
+
+  // Counts the *approximate* number of bytes held in full or in part by this
+  // Cord weighted by the sharing ratio of that data. For example, if some data
+  // edge is shared by 4 different Cords, then each cord is attributed 1/4th of
+  // the total memory usage as a 'fair share' of the total memory usage.
+  kFairShare,
+};
+
 // Cord
 //
 // A Cord is a sequence of characters, designed to be more efficient than a
@@ -204,7 +254,7 @@
   //
   // Releases the Cord data. Any nodes that share data with other Cords, if
   // applicable, will have their reference counts reduced by 1.
-  void Clear();
+  ABSL_ATTRIBUTE_REINITIALIZES void Clear();
 
   // Cord::Append()
   //
@@ -216,6 +266,58 @@
   template <typename T, EnableIfString<T> = 0>
   void Append(T&& src);
 
+  // Appends `buffer` to this cord, unless `buffer` has a zero length in which
+  // case this method has no effect on this cord instance.
+  // This method is guaranteed to consume `buffer`.
+  void Append(CordBuffer buffer);
+
+  // Returns a CordBuffer, re-using potential existing capacity in this cord.
+  //
+  // Cord instances may have additional unused capacity in the last (or first)
+  // nodes of the underlying tree to facilitate amortized growth. This method
+  // allows applications to explicitly use this spare capacity if available,
+  // or create a new CordBuffer instance otherwise.
+  // If this cord has a final non-shared node with at least `min_capacity`
+  // available, then this method will return that buffer including its data
+  // contents. I.e.; the returned buffer will have a non-zero length, and
+  // a capacity of at least `buffer.length + min_capacity`. Otherwise, this
+  // method will return `CordBuffer::CreateWithDefaultLimit(capacity)`.
+  //
+  // Below an example of using GetAppendBuffer. Notice that in this example we
+  // use `GetAppendBuffer()` only on the first iteration. As we know nothing
+  // about any initial extra capacity in `cord`, we may be able to use the extra
+  // capacity. But as we add new buffers with fully utilized contents after that
+  // we avoid calling `GetAppendBuffer()` on subsequent iterations: while this
+  // works fine, it results in an unnecessary inspection of cord contents:
+  //
+  //   void AppendRandomDataToCord(absl::Cord &cord, size_t n) {
+  //     bool first = true;
+  //     while (n > 0) {
+  //       CordBuffer buffer = first ? cord.GetAppendBuffer(n)
+  //                                 : CordBuffer::CreateWithDefaultLimit(n);
+  //       absl::Span<char> data = buffer.available_up_to(n);
+  //       FillRandomValues(data.data(), data.size());
+  //       buffer.IncreaseLengthBy(data.size());
+  //       cord.Append(std::move(buffer));
+  //       n -= data.size();
+  //       first = false;
+  //     }
+  //   }
+  CordBuffer GetAppendBuffer(size_t capacity, size_t min_capacity = 16);
+
+  // Returns a CordBuffer, re-using potential existing capacity in this cord.
+  //
+  // This function is identical to `GetAppendBuffer`, except that in the case
+  // where a new `CordBuffer` is allocated, it is allocated using the provided
+  // custom limit instead of the default limit. `GetAppendBuffer` will default
+  // to `CordBuffer::CreateWithDefaultLimit(capacity)` whereas this method
+  // will default to `CordBuffer::CreateWithCustomLimit(block_size, capacity)`.
+  // This method is equivalent to `GetAppendBuffer` if `block_size` is zero.
+  // See the documentation for `CreateWithCustomLimit` for more details on the
+  // restrictions and legal values for `block_size`.
+  CordBuffer GetCustomAppendBuffer(size_t block_size, size_t capacity,
+                                   size_t min_capacity = 16);
+
   // Cord::Prepend()
   //
   // Prepends data to the Cord, which may come from another Cord or other string
@@ -225,6 +327,11 @@
   template <typename T, EnableIfString<T> = 0>
   void Prepend(T&& src);
 
+  // Prepends `buffer` to this cord, unless `buffer` has a zero length in which
+  // case this method has no effect on this cord instance.
+  // This method is guaranteed to consume `buffer`.
+  void Prepend(CordBuffer buffer);
+
   // Cord::RemovePrefix()
   //
   // Removes the first `n` bytes of a Cord.
@@ -246,9 +353,7 @@
   // swap()
   //
   // Swaps the contents of two Cords.
-  friend void swap(Cord& x, Cord& y) noexcept {
-    x.swap(y);
-  }
+  friend void swap(Cord& x, Cord& y) noexcept { x.swap(y); }
 
   // Cord::size()
   //
@@ -262,11 +367,10 @@
 
   // Cord::EstimatedMemoryUsage()
   //
-  // Returns the *approximate* number of bytes held in full or in part by this
-  // Cord (which may not remain the same between invocations).  Note that Cords
-  // that share memory could each be "charged" independently for the same shared
-  // memory.
-  size_t EstimatedMemoryUsage() const;
+  // Returns the *approximate* number of bytes held by this cord.
+  // See CordMemoryAccounting for more information on the accounting method.
+  size_t EstimatedMemoryUsage(CordMemoryAccounting accounting_method =
+                                  CordMemoryAccounting::kTotal) const;
 
   // Cord::Compare()
   //
@@ -286,7 +390,7 @@
   bool StartsWith(const Cord& rhs) const;
   bool StartsWith(absl::string_view rhs) const;
 
-  // Cord::EndsWidth()
+  // Cord::EndsWith()
   //
   // Determines whether the Cord ends with the passed string data `rhs`.
   bool EndsWith(absl::string_view rhs) const;
@@ -316,7 +420,7 @@
   //----------------------------------------------------------------------------
   //
   // A `Cord::ChunkIterator` allows iteration over the constituent chunks of its
-  // Cord. Such iteration allows you to perform non-const operatons on the data
+  // Cord. Such iteration allows you to perform non-const operations on the data
   // of a Cord without modifying it.
   //
   // Generally, you do not instantiate a `Cord::ChunkIterator` directly;
@@ -360,17 +464,28 @@
     friend class CharIterator;
 
    private:
+    using CordRep = absl::cord_internal::CordRep;
+    using CordRepBtree = absl::cord_internal::CordRepBtree;
+    using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader;
+
+    // Constructs a `begin()` iterator from `tree`. `tree` must not be null.
+    explicit ChunkIterator(cord_internal::CordRep* tree);
+
     // Constructs a `begin()` iterator from `cord`.
     explicit ChunkIterator(const Cord* cord);
 
+    // Initializes this instance from a tree. Invoked by constructors.
+    void InitTree(cord_internal::CordRep* tree);
+
     // Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than
     // `current_chunk_.size()`.
     void RemoveChunkPrefix(size_t n);
     Cord AdvanceAndReadBytes(size_t n);
     void AdvanceBytes(size_t n);
-    // Iterates `n` bytes, where `n` is expected to be greater than or equal to
-    // `current_chunk_.size()`.
-    void AdvanceBytesSlowPath(size_t n);
+
+    // Btree specific operator++
+    ChunkIterator& AdvanceBtree();
+    void AdvanceBytesBtree(size_t n);
 
     // A view into bytes of the current `CordRep`. It may only be a view to a
     // suffix of bytes if this is being used by `CharIterator`.
@@ -381,11 +496,12 @@
     absl::cord_internal::CordRep* current_leaf_ = nullptr;
     // The number of bytes left in the `Cord` over which we are iterating.
     size_t bytes_remaining_ = 0;
-    absl::InlinedVector<absl::cord_internal::CordRep*, 4>
-        stack_of_right_children_;
+
+    // Cord reader for cord btrees. Empty if not traversing a btree.
+    CordRepBtreeReader btree_reader_;
   };
 
-  // Cord::ChunkIterator::chunk_begin()
+  // Cord::chunk_begin()
   //
   // Returns an iterator to the first chunk of the `Cord`.
   //
@@ -401,7 +517,7 @@
   //   }
   ChunkIterator chunk_begin() const;
 
-  // Cord::ChunkItertator::chunk_end()
+  // Cord::chunk_end()
   //
   // Returns an iterator one increment past the last chunk of the `Cord`.
   //
@@ -411,7 +527,7 @@
   ChunkIterator chunk_end() const;
 
   //----------------------------------------------------------------------------
-  // Cord::ChunkIterator::ChunkRange
+  // Cord::ChunkRange
   //----------------------------------------------------------------------------
   //
   // `ChunkRange` is a helper class for iterating over the chunks of the `Cord`,
@@ -424,6 +540,16 @@
   // `Cord::chunk_begin()` and `Cord::chunk_end()`.
   class ChunkRange {
    public:
+    // Fulfill minimum c++ container requirements [container.requirements]
+    // These (partial) container type definitions allow ChunkRange to be used
+    // in various utilities expecting a subset of [container.requirements].
+    // For example, the below enables using `::testing::ElementsAre(...)`
+    using value_type = absl::string_view;
+    using reference = value_type&;
+    using const_reference = const value_type&;
+    using iterator = ChunkIterator;
+    using const_iterator = ChunkIterator;
+
     explicit ChunkRange(const Cord* cord) : cord_(cord) {}
 
     ChunkIterator begin() const;
@@ -435,9 +561,9 @@
 
   // Cord::Chunks()
   //
-  // Returns a `Cord::ChunkIterator::ChunkRange` for iterating over the chunks
-  // of a `Cord` with a range-based for-loop. For most iteration tasks on a
-  // Cord, use `Cord::Chunks()` to retrieve this iterator.
+  // Returns a `Cord::ChunkRange` for iterating over the chunks of a `Cord` with
+  // a range-based for-loop. For most iteration tasks on a Cord, use
+  // `Cord::Chunks()` to retrieve this iterator.
   //
   // Example:
   //
@@ -503,7 +629,7 @@
     ChunkIterator chunk_iterator_;
   };
 
-  // Cord::CharIterator::AdvanceAndRead()
+  // Cord::AdvanceAndRead()
   //
   // Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes
   // advanced as a separate `Cord`. `n_bytes` must be less than or equal to the
@@ -511,21 +637,21 @@
   // valid to pass `char_end()` and `0`.
   static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes);
 
-  // Cord::CharIterator::Advance()
+  // Cord::Advance()
   //
   // Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than
   // or equal to the number of bytes remaining within the Cord; otherwise,
   // behavior is undefined. It is valid to pass `char_end()` and `0`.
   static void Advance(CharIterator* it, size_t n_bytes);
 
-  // Cord::CharIterator::ChunkRemaining()
+  // Cord::ChunkRemaining()
   //
   // Returns the longest contiguous view starting at the iterator's position.
   //
   // `it` must be dereferenceable.
   static absl::string_view ChunkRemaining(const CharIterator& it);
 
-  // Cord::CharIterator::char_begin()
+  // Cord::char_begin()
   //
   // Returns an iterator to the first character of the `Cord`.
   //
@@ -534,7 +660,7 @@
   // a `CharIterator` where range-based for-loops may not be available.
   CharIterator char_begin() const;
 
-  // Cord::CharIterator::char_end()
+  // Cord::char_end()
   //
   // Returns an iterator to one past the last character of the `Cord`.
   //
@@ -543,18 +669,28 @@
   // a `CharIterator` where range-based for-loops are not useful.
   CharIterator char_end() const;
 
-  // Cord::CharIterator::CharRange
+  // Cord::CharRange
   //
   // `CharRange` is a helper class for iterating over the characters of a
   // producing an iterator which can be used within a range-based for loop.
   // Construction of a `CharRange` will return an iterator pointing to the first
   // character of the Cord. Generally, do not construct a `CharRange` directly;
-  // instead, prefer to use the `Cord::Chars()` method show below.
+  // instead, prefer to use the `Cord::Chars()` method shown below.
   //
   // Implementation note: `CharRange` is simply a convenience wrapper over
   // `Cord::char_begin()` and `Cord::char_end()`.
   class CharRange {
    public:
+    // Fulfill minimum c++ container requirements [container.requirements]
+    // These (partial) container type definitions allow CharRange to be used
+    // in various utilities expecting a subset of [container.requirements].
+    // For example, the below enables using `::testing::ElementsAre(...)`
+    using value_type = char;
+    using reference = value_type&;
+    using const_reference = const value_type&;
+    using iterator = CharIterator;
+    using const_iterator = CharIterator;
+
     explicit CharRange(const Cord* cord) : cord_(cord) {}
 
     CharIterator begin() const;
@@ -564,11 +700,11 @@
     const Cord* cord_;
   };
 
-  // Cord::CharIterator::Chars()
+  // Cord::Chars()
   //
-  // Returns a `Cord::CharIterator` for iterating over the characters of a
-  // `Cord` with a range-based for-loop. For most character-based iteration
-  // tasks on a Cord, use `Cord::Chars()` to retrieve this iterator.
+  // Returns a `Cord::CharRange` for iterating over the characters of a `Cord`
+  // with a range-based for-loop. For most character-based iteration tasks on a
+  // Cord, use `Cord::Chars()` to retrieve this iterator.
   //
   // Example:
   //
@@ -615,6 +751,29 @@
     cord->Append(part);
   }
 
+  // Cord::SetExpectedChecksum()
+  //
+  // Stores a checksum value with this non-empty cord instance, for later
+  // retrieval.
+  //
+  // The expected checksum is a number stored out-of-band, alongside the data.
+  // It is preserved across copies and assignments, but any mutations to a cord
+  // will cause it to lose its expected checksum.
+  //
+  // The expected checksum is not part of a Cord's value, and does not affect
+  // operations such as equality or hashing.
+  //
+  // This field is intended to store a CRC32C checksum for later validation, to
+  // help support end-to-end checksum workflows.  However, the Cord API itself
+  // does no CRC validation, and assigns no meaning to this number.
+  //
+  // This call has no effect if this cord is empty.
+  void SetExpectedChecksum(uint32_t crc);
+
+  // Returns this cord's expected checksum, if it has one.  Otherwise, returns
+  // nullopt.
+  absl::optional<uint32_t> ExpectedChecksum() const;
+
   template <typename H>
   friend H AbslHashValue(H hash_state, const absl::Cord& c) {
     absl::optional<absl::string_view> maybe_flat = c.TryFlat();
@@ -624,11 +783,34 @@
     return c.HashFragmented(std::move(hash_state));
   }
 
+  // Create a Cord with the contents of StringConstant<T>::value.
+  // No allocations will be done and no data will be copied.
+  // This is an INTERNAL API and subject to change or removal. This API can only
+  // be used by spelling absl::strings_internal::MakeStringConstant, which is
+  // also an internal API.
+  template <typename T>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  constexpr Cord(strings_internal::StringConstant<T>);
+
  private:
+  using CordRep = absl::cord_internal::CordRep;
+  using CordRepFlat = absl::cord_internal::CordRepFlat;
+  using CordzInfo = cord_internal::CordzInfo;
+  using CordzUpdateScope = cord_internal::CordzUpdateScope;
+  using CordzUpdateTracker = cord_internal::CordzUpdateTracker;
+  using InlineData = cord_internal::InlineData;
+  using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
+
+  // Creates a cord instance with `method` representing the originating
+  // public API call causing the cord to be created.
+  explicit Cord(absl::string_view src, MethodIdentifier method);
+
   friend class CordTestPeer;
   friend bool operator==(const Cord& lhs, const Cord& rhs);
   friend bool operator==(const Cord& lhs, absl::string_view rhs);
 
+  friend const CordzInfo* GetCordzInfoForTesting(const Cord& cord);
+
   // Calls the provided function once for each cord chunk, in order.  Unlike
   // Chunks(), this API will not allocate memory.
   void ForEachChunk(absl::FunctionRef<void(absl::string_view)>) const;
@@ -644,96 +826,138 @@
   // InlineRep holds either a tree pointer, or an array of kMaxInline bytes.
   class InlineRep {
    public:
-    static constexpr unsigned char kMaxInline = 15;
+    static constexpr unsigned char kMaxInline = cord_internal::kMaxInline;
     static_assert(kMaxInline >= sizeof(absl::cord_internal::CordRep*), "");
-    // Tag byte & kMaxInline means we are storing a pointer.
-    static constexpr unsigned char kTreeFlag = 1 << 4;
-    // Tag byte & kProfiledFlag means we are profiling the Cord.
-    static constexpr unsigned char kProfiledFlag = 1 << 5;
 
-    constexpr InlineRep() : data_{} {}
+    constexpr InlineRep() : data_() {}
+    explicit InlineRep(InlineData::DefaultInitType init) : data_(init) {}
     InlineRep(const InlineRep& src);
     InlineRep(InlineRep&& src);
     InlineRep& operator=(const InlineRep& src);
     InlineRep& operator=(InlineRep&& src) noexcept;
 
+    explicit constexpr InlineRep(absl::string_view sv, CordRep* rep);
+
     void Swap(InlineRep* rhs);
     bool empty() const;
     size_t size() const;
     const char* data() const;  // Returns nullptr if holding pointer
-    void set_data(const char* data, size_t n,
-                  bool nullify_tail);  // Discards pointer, if any
-    char* set_data(size_t n);  // Write data to the result
+    void set_data(const char* data, size_t n);  // Discards pointer, if any
+    char* set_data(size_t n);                   // Write data to the result
     // Returns nullptr if holding bytes
     absl::cord_internal::CordRep* tree() const;
-    // Discards old pointer, if any
-    void set_tree(absl::cord_internal::CordRep* rep);
-    // Replaces a tree with a new root. This is faster than set_tree, but it
-    // should only be used when it's clear that the old rep was a tree.
-    void replace_tree(absl::cord_internal::CordRep* rep);
+    absl::cord_internal::CordRep* as_tree() const;
+    const char* as_chars() const;
     // Returns non-null iff was holding a pointer
     absl::cord_internal::CordRep* clear();
     // Converts to pointer if necessary.
-    absl::cord_internal::CordRep* force_tree(size_t extra_hint);
-    void reduce_size(size_t n);  // REQUIRES: holding data
+    void reduce_size(size_t n);    // REQUIRES: holding data
     void remove_prefix(size_t n);  // REQUIRES: holding data
-    void AppendArray(const char* src_data, size_t src_size);
+    void AppendArray(absl::string_view src, MethodIdentifier method);
     absl::string_view FindFlatStartPiece() const;
-    void AppendTree(absl::cord_internal::CordRep* tree);
-    void PrependTree(absl::cord_internal::CordRep* tree);
-    void GetAppendRegion(char** region, size_t* size, size_t max_length);
-    void GetAppendRegion(char** region, size_t* size);
-    bool IsSame(const InlineRep& other) const {
-      return memcmp(data_, other.data_, sizeof(data_)) == 0;
-    }
-    int BitwiseCompare(const InlineRep& other) const {
-      uint64_t x, y;
-      // Use memcpy to avoid anti-aliasing issues.
-      memcpy(&x, data_, sizeof(x));
-      memcpy(&y, other.data_, sizeof(y));
-      if (x == y) {
-        memcpy(&x, data_ + 8, sizeof(x));
-        memcpy(&y, other.data_ + 8, sizeof(y));
-        if (x == y) return 0;
-      }
-      return absl::big_endian::FromHost64(x) < absl::big_endian::FromHost64(y)
-                 ? -1
-                 : 1;
-    }
+
+    // Creates a CordRepFlat instance from the current inlined data with `extra'
+    // bytes of desired additional capacity.
+    CordRepFlat* MakeFlatWithExtraCapacity(size_t extra);
+
+    // Sets the tree value for this instance. `rep` must not be null.
+    // Requires the current instance to hold a tree, and a lock to be held on
+    // any CordzInfo referenced by this instance. The latter is enforced through
+    // the CordzUpdateScope argument. If the current instance is sampled, then
+    // the CordzInfo instance is updated to reference the new `rep` value.
+    void SetTree(CordRep* rep, const CordzUpdateScope& scope);
+
+    // Identical to SetTree(), except that `rep` is allowed to be null, in
+    // which case the current instance is reset to an empty value.
+    void SetTreeOrEmpty(CordRep* rep, const CordzUpdateScope& scope);
+
+    // Sets the tree value for this instance, and randomly samples this cord.
+    // This function disregards existing contents in `data_`, and should be
+    // called when a Cord is 'promoted' from an 'uninitialized' or 'inlined'
+    // value to a non-inlined (tree / ring) value.
+    void EmplaceTree(CordRep* rep, MethodIdentifier method);
+
+    // Identical to EmplaceTree, except that it copies the parent stack from
+    // the provided `parent` data if the parent is sampled.
+    void EmplaceTree(CordRep* rep, const InlineData& parent,
+                     MethodIdentifier method);
+
+    // Commits the change of a newly created, or updated `rep` root value into
+    // this cord. `old_rep` indicates the old (inlined or tree) value of the
+    // cord, and determines if the commit invokes SetTree() or EmplaceTree().
+    void CommitTree(const CordRep* old_rep, CordRep* rep,
+                    const CordzUpdateScope& scope, MethodIdentifier method);
+
+    void AppendTreeToInlined(CordRep* tree, MethodIdentifier method);
+    void AppendTreeToTree(CordRep* tree, MethodIdentifier method);
+    void AppendTree(CordRep* tree, MethodIdentifier method);
+    void PrependTreeToInlined(CordRep* tree, MethodIdentifier method);
+    void PrependTreeToTree(CordRep* tree, MethodIdentifier method);
+    void PrependTree(CordRep* tree, MethodIdentifier method);
+
+    bool IsSame(const InlineRep& other) const { return data_ == other.data_; }
+
     void CopyTo(std::string* dst) const {
       // memcpy is much faster when operating on a known size. On most supported
       // platforms, the small string optimization is large enough that resizing
       // to 15 bytes does not cause a memory allocation.
-      absl::strings_internal::STLStringResizeUninitialized(dst,
-                                                           sizeof(data_) - 1);
-      memcpy(&(*dst)[0], data_, sizeof(data_) - 1);
+      absl::strings_internal::STLStringResizeUninitialized(dst, kMaxInline);
+      data_.copy_max_inline_to(&(*dst)[0]);
       // erase is faster than resize because the logic for memory allocation is
       // not needed.
-      dst->erase(data_[kMaxInline]);
+      dst->erase(inline_size());
     }
 
     // Copies the inline contents into `dst`. Assumes the cord is not empty.
     void CopyToArray(char* dst) const;
 
-    bool is_tree() const { return data_[kMaxInline] > kMaxInline; }
+    bool is_tree() const { return data_.is_tree(); }
+
+    // Returns true if the Cord is being profiled by cordz.
+    bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); }
+
+    // Returns the available inlined capacity, or 0 if is_tree() == true.
+    size_t remaining_inline_capacity() const {
+      return data_.is_tree() ? 0 : kMaxInline - data_.inline_size();
+    }
+
+    // Returns the profiled CordzInfo, or nullptr if not sampled.
+    absl::cord_internal::CordzInfo* cordz_info() const {
+      return data_.cordz_info();
+    }
+
+    // Sets the profiled CordzInfo. `cordz_info` must not be null.
+    void set_cordz_info(cord_internal::CordzInfo* cordz_info) {
+      assert(cordz_info != nullptr);
+      data_.set_cordz_info(cordz_info);
+    }
+
+    // Resets the current cordz_info to null / empty.
+    void clear_cordz_info() { data_.clear_cordz_info(); }
 
    private:
     friend class Cord;
 
     void AssignSlow(const InlineRep& src);
-    // Unrefs the tree, stops profiling, and zeroes the contents
-    void ClearSlow();
+    // Unrefs the tree and stops profiling.
+    void UnrefTree();
 
-    // If the data has length <= kMaxInline, we store it in data_[0..len-1],
-    // and store the length in data_[kMaxInline].  Else we store it in a tree
-    // and store a pointer to that tree in data_[0..sizeof(CordRep*)-1].
-    alignas(absl::cord_internal::CordRep*) char data_[kMaxInline + 1];
+    void ResetToEmpty() { data_ = {}; }
+
+    void set_inline_size(size_t size) { data_.set_inline_size(size); }
+    size_t inline_size() const { return data_.inline_size(); }
+
+    // Empty cords that carry a checksum have a CordRepCrc node with a null
+    // child node. The code can avoid lots of special cases where it would
+    // otherwise transition from tree to inline storage if we just remove the
+    // CordRepCrc node before mutations. Must never be called inside a
+    // CordzUpdateScope since it untracks the cordz info.
+    void MaybeRemoveEmptyCrcNode();
+
+    cord_internal::InlineData data_;
   };
   InlineRep contents_;
 
-  // Helper for MemoryUsage().
-  static size_t MemoryUsageAux(const absl::cord_internal::CordRep* rep);
-
   // Helper for GetFlat() and TryFlat().
   static bool GetFlatAux(absl::cord_internal::CordRep* rep,
                          absl::string_view* fragment);
@@ -771,6 +995,24 @@
   template <typename C>
   void AppendImpl(C&& src);
 
+  // Appends / Prepends `src` to this instance, using precise sizing.
+  // This method does explicitly not attempt to use any spare capacity
+  // in any pending last added private owned flat.
+  // Requires `src` to be <= kMaxFlatLength.
+  void AppendPrecise(absl::string_view src, MethodIdentifier method);
+  void PrependPrecise(absl::string_view src, MethodIdentifier method);
+
+  CordBuffer GetAppendBufferSlowPath(size_t block_size, size_t capacity,
+                                     size_t min_capacity);
+
+  // Prepends the provided data to this instance. `method` contains the public
+  // API method for this action which is tracked for Cordz sampling purposes.
+  void PrependArray(absl::string_view src, MethodIdentifier method);
+
+  // Assigns the value in 'src' to this instance, 'stealing' its contents.
+  // Requires src.length() > kMaxBytesToCopy.
+  Cord& AssignLargeString(std::string&& src);
+
   // Helper for AbslHashValue().
   template <typename H>
   H HashFragmented(H hash_state) const {
@@ -781,6 +1023,10 @@
     });
     return H::combine(combiner.finalize(std::move(hash_state)), size());
   }
+
+  friend class CrcCord;
+  void SetCrcCordState(crc_internal::CrcCordState state);
+  const crc_internal::CrcCordState* MaybeGetCrcCordState() const;
 };
 
 ABSL_NAMESPACE_END
@@ -797,63 +1043,17 @@
 
 namespace cord_internal {
 
-// Fast implementation of memmove for up to 15 bytes. This implementation is
-// safe for overlapping regions. If nullify_tail is true, the destination is
-// padded with '\0' up to 16 bytes.
-inline void SmallMemmove(char* dst, const char* src, size_t n,
-                         bool nullify_tail = false) {
-  if (n >= 8) {
-    assert(n <= 16);
-    uint64_t buf1;
-    uint64_t buf2;
-    memcpy(&buf1, src, 8);
-    memcpy(&buf2, src + n - 8, 8);
-    if (nullify_tail) {
-      memset(dst + 8, 0, 8);
-    }
-    memcpy(dst, &buf1, 8);
-    memcpy(dst + n - 8, &buf2, 8);
-  } else if (n >= 4) {
-    uint32_t buf1;
-    uint32_t buf2;
-    memcpy(&buf1, src, 4);
-    memcpy(&buf2, src + n - 4, 4);
-    if (nullify_tail) {
-      memset(dst + 4, 0, 4);
-      memset(dst + 8, 0, 8);
-    }
-    memcpy(dst, &buf1, 4);
-    memcpy(dst + n - 4, &buf2, 4);
-  } else {
-    if (n != 0) {
-      dst[0] = src[0];
-      dst[n / 2] = src[n / 2];
-      dst[n - 1] = src[n - 1];
-    }
-    if (nullify_tail) {
-      memset(dst + 8, 0, 8);
-      memset(dst + n, 0, 8);
-    }
-  }
-}
-
 // Does non-template-specific `CordRepExternal` initialization.
-// Expects `data` to be non-empty.
+// Requires `data` to be non-empty.
 void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep);
 
 // Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer
-// to it, or `nullptr` if `data` was empty.
+// to it. Requires `data` to be non-empty.
 template <typename Releaser>
 // NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
 CordRep* NewExternalRep(absl::string_view data, Releaser&& releaser) {
+  assert(!data.empty());
   using ReleaserType = absl::decay_t<Releaser>;
-  if (data.empty()) {
-    // Never create empty external nodes.
-    InvokeReleaser(Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
-                   data);
-    return nullptr;
-  }
-
   CordRepExternal* rep = new CordRepExternalImpl<ReleaserType>(
       std::forward<Releaser>(releaser), 0);
   InitializeCordRepExternal(data, rep);
@@ -873,18 +1073,34 @@
 template <typename Releaser>
 Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) {
   Cord cord;
-  cord.contents_.set_tree(::absl::cord_internal::NewExternalRep(
-      data, std::forward<Releaser>(releaser)));
+  if (ABSL_PREDICT_TRUE(!data.empty())) {
+    cord.contents_.EmplaceTree(::absl::cord_internal::NewExternalRep(
+                                   data, std::forward<Releaser>(releaser)),
+                               Cord::MethodIdentifier::kMakeCordFromExternal);
+  } else {
+    using ReleaserType = absl::decay_t<Releaser>;
+    cord_internal::InvokeReleaser(
+        cord_internal::Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
+        data);
+  }
   return cord;
 }
 
-inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src) {
-  cord_internal::SmallMemmove(data_, src.data_, sizeof(data_));
+constexpr Cord::InlineRep::InlineRep(absl::string_view sv, CordRep* rep)
+    : data_(sv, rep) {}
+
+inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src)
+    : data_(InlineData::kDefaultInit) {
+  if (CordRep* tree = src.tree()) {
+    EmplaceTree(CordRep::Ref(tree), src.data_,
+                CordzUpdateTracker::kConstructorCord);
+  } else {
+    data_ = src.data_;
+  }
 }
 
-inline Cord::InlineRep::InlineRep(Cord::InlineRep&& src) {
-  memcpy(data_, src.data_, sizeof(data_));
-  memset(src.data_, 0, sizeof(data_));
+inline Cord::InlineRep::InlineRep(Cord::InlineRep&& src) : data_(src.data_) {
+  src.ResetToEmpty();
 }
 
 inline Cord::InlineRep& Cord::InlineRep::operator=(const Cord::InlineRep& src) {
@@ -892,7 +1108,7 @@
     return *this;
   }
   if (!is_tree() && !src.is_tree()) {
-    cord_internal::SmallMemmove(data_, src.data_, sizeof(data_));
+    data_ = src.data_;
     return *this;
   }
   AssignSlow(src);
@@ -902,10 +1118,10 @@
 inline Cord::InlineRep& Cord::InlineRep::operator=(
     Cord::InlineRep&& src) noexcept {
   if (is_tree()) {
-    ClearSlow();
+    UnrefTree();
   }
-  memcpy(data_, src.data_, sizeof(data_));
-  memset(src.data_, 0, sizeof(data_));
+  data_ = src.data_;
+  src.ResetToEmpty();
   return *this;
 }
 
@@ -913,82 +1129,147 @@
   if (rhs == this) {
     return;
   }
-
-  Cord::InlineRep tmp;
-  cord_internal::SmallMemmove(tmp.data_, data_, sizeof(data_));
-  cord_internal::SmallMemmove(data_, rhs->data_, sizeof(data_));
-  cord_internal::SmallMemmove(rhs->data_, tmp.data_, sizeof(data_));
+  std::swap(data_, rhs->data_);
 }
 
 inline const char* Cord::InlineRep::data() const {
-  return is_tree() ? nullptr : data_;
+  return is_tree() ? nullptr : data_.as_chars();
+}
+
+inline const char* Cord::InlineRep::as_chars() const {
+  assert(!data_.is_tree());
+  return data_.as_chars();
+}
+
+inline absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const {
+  assert(data_.is_tree());
+  return data_.as_tree();
 }
 
 inline absl::cord_internal::CordRep* Cord::InlineRep::tree() const {
   if (is_tree()) {
-    absl::cord_internal::CordRep* rep;
-    memcpy(&rep, data_, sizeof(rep));
-    return rep;
+    return as_tree();
   } else {
     return nullptr;
   }
 }
 
-inline bool Cord::InlineRep::empty() const { return data_[kMaxInline] == 0; }
+inline bool Cord::InlineRep::empty() const { return data_.is_empty(); }
 
 inline size_t Cord::InlineRep::size() const {
-  const char tag = data_[kMaxInline];
-  if (tag <= kMaxInline) return tag;
-  return static_cast<size_t>(tree()->length);
+  return is_tree() ? as_tree()->length : inline_size();
 }
 
-inline void Cord::InlineRep::set_tree(absl::cord_internal::CordRep* rep) {
-  if (rep == nullptr) {
-    memset(data_, 0, sizeof(data_));
+inline cord_internal::CordRepFlat* Cord::InlineRep::MakeFlatWithExtraCapacity(
+    size_t extra) {
+  static_assert(cord_internal::kMinFlatLength >= sizeof(data_), "");
+  size_t len = data_.inline_size();
+  auto* result = CordRepFlat::New(len + extra);
+  result->length = len;
+  data_.copy_max_inline_to(result->Data());
+  return result;
+}
+
+inline void Cord::InlineRep::EmplaceTree(CordRep* rep,
+                                         MethodIdentifier method) {
+  assert(rep);
+  data_.make_tree(rep);
+  CordzInfo::MaybeTrackCord(data_, method);
+}
+
+inline void Cord::InlineRep::EmplaceTree(CordRep* rep, const InlineData& parent,
+                                         MethodIdentifier method) {
+  data_.make_tree(rep);
+  CordzInfo::MaybeTrackCord(data_, parent, method);
+}
+
+inline void Cord::InlineRep::SetTree(CordRep* rep,
+                                     const CordzUpdateScope& scope) {
+  assert(rep);
+  assert(data_.is_tree());
+  data_.set_tree(rep);
+  scope.SetCordRep(rep);
+}
+
+inline void Cord::InlineRep::SetTreeOrEmpty(CordRep* rep,
+                                            const CordzUpdateScope& scope) {
+  assert(data_.is_tree());
+  if (rep) {
+    data_.set_tree(rep);
   } else {
-    bool was_tree = is_tree();
-    memcpy(data_, &rep, sizeof(rep));
-    memset(data_ + sizeof(rep), 0, sizeof(data_) - sizeof(rep) - 1);
-    if (!was_tree) {
-      data_[kMaxInline] = kTreeFlag;
-    }
+    data_ = {};
   }
+  scope.SetCordRep(rep);
 }
 
-inline void Cord::InlineRep::replace_tree(absl::cord_internal::CordRep* rep) {
-  ABSL_ASSERT(is_tree());
-  if (ABSL_PREDICT_FALSE(rep == nullptr)) {
-    set_tree(rep);
-    return;
+inline void Cord::InlineRep::CommitTree(const CordRep* old_rep, CordRep* rep,
+                                        const CordzUpdateScope& scope,
+                                        MethodIdentifier method) {
+  if (old_rep) {
+    SetTree(rep, scope);
+  } else {
+    EmplaceTree(rep, method);
   }
-  memcpy(data_, &rep, sizeof(rep));
-  memset(data_ + sizeof(rep), 0, sizeof(data_) - sizeof(rep) - 1);
 }
 
 inline absl::cord_internal::CordRep* Cord::InlineRep::clear() {
-  const char tag = data_[kMaxInline];
-  absl::cord_internal::CordRep* result = nullptr;
-  if (tag > kMaxInline) {
-    memcpy(&result, data_, sizeof(result));
+  if (is_tree()) {
+    CordzInfo::MaybeUntrackCord(cordz_info());
   }
-  memset(data_, 0, sizeof(data_));  // Clear the cord
+  absl::cord_internal::CordRep* result = tree();
+  ResetToEmpty();
   return result;
 }
 
 inline void Cord::InlineRep::CopyToArray(char* dst) const {
   assert(!is_tree());
-  size_t n = data_[kMaxInline];
+  size_t n = inline_size();
   assert(n != 0);
-  cord_internal::SmallMemmove(dst, data_, n);
+  cord_internal::SmallMemmove(dst, data_.as_chars(), n);
+}
+
+inline void Cord::InlineRep::MaybeRemoveEmptyCrcNode() {
+  CordRep* rep = tree();
+  if (rep == nullptr || ABSL_PREDICT_TRUE(rep->length > 0)) {
+    return;
+  }
+  assert(rep->IsCrc());
+  assert(rep->crc()->child == nullptr);
+  CordzInfo::MaybeUntrackCord(cordz_info());
+  CordRep::Unref(rep);
+  ResetToEmpty();
 }
 
 constexpr inline Cord::Cord() noexcept {}
 
+inline Cord::Cord(absl::string_view src)
+    : Cord(src, CordzUpdateTracker::kConstructorString) {}
+
+template <typename T>
+constexpr Cord::Cord(strings_internal::StringConstant<T>)
+    : contents_(strings_internal::StringConstant<T>::value,
+                strings_internal::StringConstant<T>::value.size() <=
+                        cord_internal::kMaxInline
+                    ? nullptr
+                    : &cord_internal::ConstInitExternalStorage<
+                          strings_internal::StringConstant<T>>::value) {}
+
 inline Cord& Cord::operator=(const Cord& x) {
   contents_ = x.contents_;
   return *this;
 }
 
+template <typename T, Cord::EnableIfString<T>>
+Cord& Cord::operator=(T&& src) {
+  if (src.size() <= cord_internal::kMaxBytesToCopy) {
+    return operator=(absl::string_view(src));
+  } else {
+    return AssignLargeString(std::forward<T>(src));
+  }
+}
+
+inline Cord::Cord(const Cord& src) : contents_(src.contents_) {}
+
 inline Cord::Cord(Cord&& src) noexcept : contents_(std::move(src.contents_)) {}
 
 inline void Cord::swap(Cord& other) noexcept {
@@ -1001,19 +1282,29 @@
 }
 
 extern template Cord::Cord(std::string&& src);
-extern template Cord& Cord::operator=(std::string&& src);
 
 inline size_t Cord::size() const {
   // Length is 1st field in str.rep_
   return contents_.size();
 }
 
-inline bool Cord::empty() const { return contents_.empty(); }
+inline bool Cord::empty() const { return size() == 0; }
 
-inline size_t Cord::EstimatedMemoryUsage() const {
+inline size_t Cord::EstimatedMemoryUsage(
+    CordMemoryAccounting accounting_method) const {
   size_t result = sizeof(Cord);
   if (const absl::cord_internal::CordRep* rep = contents_.tree()) {
-    result += MemoryUsageAux(rep);
+    switch (accounting_method) {
+      case CordMemoryAccounting::kFairShare:
+        result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
+        break;
+      case CordMemoryAccounting::kTotalMorePrecise:
+        result += cord_internal::GetMorePreciseMemoryUsage(rep);
+        break;
+      case CordMemoryAccounting::kTotal:
+        result += cord_internal::GetEstimatedMemoryUsage(rep);
+        break;
+    }
   }
   return result;
 }
@@ -1044,7 +1335,46 @@
 }
 
 inline void Cord::Append(absl::string_view src) {
-  contents_.AppendArray(src.data(), src.size());
+  contents_.AppendArray(src, CordzUpdateTracker::kAppendString);
+}
+
+inline void Cord::Prepend(absl::string_view src) {
+  PrependArray(src, CordzUpdateTracker::kPrependString);
+}
+
+inline void Cord::Append(CordBuffer buffer) {
+  if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+  absl::string_view short_value;
+  if (CordRep* rep = buffer.ConsumeValue(short_value)) {
+    contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer);
+  } else {
+    AppendPrecise(short_value, CordzUpdateTracker::kAppendCordBuffer);
+  }
+}
+
+inline void Cord::Prepend(CordBuffer buffer) {
+  if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+  absl::string_view short_value;
+  if (CordRep* rep = buffer.ConsumeValue(short_value)) {
+    contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer);
+  } else {
+    PrependPrecise(short_value, CordzUpdateTracker::kPrependCordBuffer);
+  }
+}
+
+inline CordBuffer Cord::GetAppendBuffer(size_t capacity, size_t min_capacity) {
+  if (empty()) return CordBuffer::CreateWithDefaultLimit(capacity);
+  return GetAppendBufferSlowPath(0, capacity, min_capacity);
+}
+
+inline CordBuffer Cord::GetCustomAppendBuffer(size_t block_size,
+                                              size_t capacity,
+                                              size_t min_capacity) {
+  if (empty()) {
+    return block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity)
+                      : CordBuffer::CreateWithDefaultLimit(capacity);
+  }
+  return GetAppendBufferSlowPath(block_size, capacity, min_capacity);
 }
 
 extern template void Cord::Append(std::string&& src);
@@ -1052,7 +1382,7 @@
 
 inline int Cord::Compare(const Cord& rhs) const {
   if (!contents_.is_tree() && !rhs.contents_.is_tree()) {
-    return contents_.BitwiseCompare(rhs.contents_);
+    return contents_.data_.Compare(rhs.contents_.data_);
   }
 
   return CompareImpl(rhs);
@@ -1072,17 +1402,71 @@
   return EqualsImpl(rhs, rhs_size);
 }
 
-inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
-    : bytes_remaining_(cord->size()) {
-  if (cord->empty()) return;
-  if (cord->contents_.is_tree()) {
-    stack_of_right_children_.push_back(cord->contents_.tree());
-    operator++();
+inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
+  tree = cord_internal::SkipCrcNode(tree);
+  if (tree->tag == cord_internal::BTREE) {
+    current_chunk_ = btree_reader_.Init(tree->btree());
   } else {
-    current_chunk_ = absl::string_view(cord->contents_.data(), cord->size());
+    current_leaf_ = tree;
+    current_chunk_ = cord_internal::EdgeData(tree);
   }
 }
 
+inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree) {
+  bytes_remaining_ = tree->length;
+  InitTree(tree);
+}
+
+inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) {
+  if (CordRep* tree = cord->contents_.tree()) {
+    bytes_remaining_ = tree->length;
+    if (ABSL_PREDICT_TRUE(bytes_remaining_ != 0)) {
+      InitTree(tree);
+    } else {
+      current_chunk_ = {};
+    }
+  } else {
+    bytes_remaining_ = cord->contents_.inline_size();
+    current_chunk_ = {cord->contents_.data(), bytes_remaining_};
+  }
+}
+
+inline Cord::ChunkIterator& Cord::ChunkIterator::AdvanceBtree() {
+  current_chunk_ = btree_reader_.Next();
+  return *this;
+}
+
+inline void Cord::ChunkIterator::AdvanceBytesBtree(size_t n) {
+  assert(n >= current_chunk_.size());
+  bytes_remaining_ -= n;
+  if (bytes_remaining_) {
+    if (n == current_chunk_.size()) {
+      current_chunk_ = btree_reader_.Next();
+    } else {
+      size_t offset = btree_reader_.length() - bytes_remaining_;
+      current_chunk_ = btree_reader_.Seek(offset);
+    }
+  } else {
+    current_chunk_ = {};
+  }
+}
+
+inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
+  ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
+                        "Attempted to iterate past `end()`");
+  assert(bytes_remaining_ >= current_chunk_.size());
+  bytes_remaining_ -= current_chunk_.size();
+  if (bytes_remaining_ > 0) {
+    if (btree_reader_) {
+      return AdvanceBtree();
+    } else {
+      assert(!current_chunk_.empty());  // Called on invalid iterator.
+    }
+    current_chunk_ = {};
+  }
+  return *this;
+}
+
 inline Cord::ChunkIterator Cord::ChunkIterator::operator++(int) {
   ChunkIterator tmp(*this);
   operator++();
@@ -1114,10 +1498,15 @@
 }
 
 inline void Cord::ChunkIterator::AdvanceBytes(size_t n) {
+  assert(bytes_remaining_ >= n);
   if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) {
     RemoveChunkPrefix(n);
   } else if (n != 0) {
-    AdvanceBytesSlowPath(n);
+    if (btree_reader_) {
+      AdvanceBytesBtree(n);
+    } else {
+      bytes_remaining_ = 0;
+    }
   }
 }
 
@@ -1204,11 +1593,11 @@
   if (rep == nullptr) {
     callback(absl::string_view(contents_.data(), contents_.size()));
   } else {
-    return ForEachChunkAux(rep, callback);
+    ForEachChunkAux(rep, callback);
   }
 }
 
-// Nonmember Cord-to-Cord relational operarators.
+// Nonmember Cord-to-Cord relational operators.
 inline bool operator==(const Cord& lhs, const Cord& rhs) {
   if (lhs.contents_.IsSame(rhs.contents_)) return true;
   size_t rhs_size = rhs.size();
@@ -1217,12 +1606,8 @@
 }
 
 inline bool operator!=(const Cord& x, const Cord& y) { return !(x == y); }
-inline bool operator<(const Cord& x, const Cord& y) {
-  return x.Compare(y) < 0;
-}
-inline bool operator>(const Cord& x, const Cord& y) {
-  return x.Compare(y) > 0;
-}
+inline bool operator<(const Cord& x, const Cord& y) { return x.Compare(y) < 0; }
+inline bool operator>(const Cord& x, const Cord& y) { return x.Compare(y) > 0; }
 inline bool operator<=(const Cord& x, const Cord& y) {
   return x.Compare(y) <= 0;
 }
@@ -1263,7 +1648,6 @@
  public:
   static size_t FlatOverhead();
   static size_t MaxFlatLength();
-  static size_t SizeofCordRepConcat();
   static size_t SizeofCordRepExternal();
   static size_t SizeofCordRepSubstring();
   static size_t FlatTagToLength(uint8_t tag);
diff --git a/abseil-cpp/absl/strings/cord_analysis.cc b/abseil-cpp/absl/strings/cord_analysis.cc
new file mode 100644
index 0000000..e859b0d
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_analysis.cc
@@ -0,0 +1,210 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/cord_analysis.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <unordered_set>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+//
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/functional/function_ref.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+// Accounting mode for analyzing memory usage.
+enum class Mode { kFairShare, kTotal, kTotalMorePrecise };
+
+// CordRepRef holds a `const CordRep*` reference in rep, and depending on mode,
+// holds a 'fraction' representing a cumulative inverse refcount weight.
+template <Mode mode>
+struct CordRepRef {
+  // Instantiates a CordRepRef instance.
+  explicit CordRepRef(const CordRep* r) : rep(r) {}
+
+  // Creates a child reference holding the provided child.
+  // Overloaded to add cumulative reference count for kFairShare.
+  CordRepRef Child(const CordRep* child) const { return CordRepRef(child); }
+
+  const CordRep* rep;
+};
+
+// RawUsage holds the computed total number of bytes.
+template <Mode mode>
+struct RawUsage {
+  size_t total = 0;
+
+  // Add 'size' to total, ignoring the CordRepRef argument.
+  void Add(size_t size, CordRepRef<mode>) { total += size; }
+};
+
+// Overloaded representation of RawUsage that tracks the set of objects
+// counted, and avoids double-counting objects referenced more than once
+// by the same Cord.
+template <>
+struct RawUsage<Mode::kTotalMorePrecise> {
+  size_t total = 0;
+  // TODO(b/289250880): Replace this with a flat_hash_set.
+  std::unordered_set<const CordRep*> counted;
+
+  void Add(size_t size, CordRepRef<Mode::kTotalMorePrecise> repref) {
+    if (counted.find(repref.rep) == counted.end()) {
+      counted.insert(repref.rep);
+      total += size;
+    }
+  }
+};
+
+// Returns n / refcount avoiding a div for the common refcount == 1.
+template <typename refcount_t>
+double MaybeDiv(double d, refcount_t refcount) {
+  return refcount == 1 ? d : d / refcount;
+}
+
+// Overloaded 'kFairShare' specialization for CordRepRef. This class holds a
+// `fraction` value which represents a cumulative inverse refcount weight.
+// For example, a top node with a reference count of 2 will have a fraction
+// value of 1/2 = 0.5, representing the 'fair share' of memory it references.
+// A node below such a node with a reference count of 5 then has a fraction of
+// 0.5 / 5 = 0.1 representing the fair share of memory below that node, etc.
+template <>
+struct CordRepRef<Mode::kFairShare> {
+  // Creates a CordRepRef with the provided rep and top (parent) fraction.
+  explicit CordRepRef(const CordRep* r, double frac = 1.0)
+      : rep(r), fraction(MaybeDiv(frac, r->refcount.Get())) {}
+
+  // Returns a CordRepRef with a fraction of `this->fraction / child.refcount`
+  CordRepRef Child(const CordRep* child) const {
+    return CordRepRef(child, fraction);
+  }
+
+  const CordRep* rep;
+  double fraction;
+};
+
+// Overloaded 'kFairShare' specialization for RawUsage
+template <>
+struct RawUsage<Mode::kFairShare> {
+  double total = 0;
+
+  // Adds `size` multiplied by `rep.fraction` to the total size.
+  void Add(size_t size, CordRepRef<Mode::kFairShare> rep) {
+    total += static_cast<double>(size) * rep.fraction;
+  }
+};
+
+// Computes the estimated memory size of the provided data edge.
+// External reps are assumed 'heap allocated at their exact size'.
+template <Mode mode>
+void AnalyzeDataEdge(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+  assert(IsDataEdge(rep.rep));
+
+  // Consume all substrings
+  if (rep.rep->tag == SUBSTRING) {
+    raw_usage.Add(sizeof(CordRepSubstring), rep);
+    rep = rep.Child(rep.rep->substring()->child);
+  }
+
+  // Consume FLAT / EXTERNAL
+  const size_t size =
+      rep.rep->tag >= FLAT
+          ? rep.rep->flat()->AllocatedSize()
+          : rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
+  raw_usage.Add(size, rep);
+}
+
+// Computes the memory size of the provided Ring tree.
+template <Mode mode>
+void AnalyzeRing(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+  const CordRepRing* ring = rep.rep->ring();
+  raw_usage.Add(CordRepRing::AllocSize(ring->capacity()), rep);
+  ring->ForEach([&](CordRepRing::index_type pos) {
+    AnalyzeDataEdge(rep.Child(ring->entry_child(pos)), raw_usage);
+  });
+}
+
+// Computes the memory size of the provided Btree tree.
+template <Mode mode>
+void AnalyzeBtree(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+  raw_usage.Add(sizeof(CordRepBtree), rep);
+  const CordRepBtree* tree = rep.rep->btree();
+  if (tree->height() > 0) {
+    for (CordRep* edge : tree->Edges()) {
+      AnalyzeBtree(rep.Child(edge), raw_usage);
+    }
+  } else {
+    for (CordRep* edge : tree->Edges()) {
+      AnalyzeDataEdge(rep.Child(edge), raw_usage);
+    }
+  }
+}
+
+template <Mode mode>
+size_t GetEstimatedUsage(const CordRep* rep) {
+  // Zero initialized memory usage totals.
+  RawUsage<mode> raw_usage;
+
+  // Capture top level node and refcount into a CordRepRef.
+  CordRepRef<mode> repref(rep);
+
+  // Consume the top level CRC node if present.
+  if (repref.rep->tag == CRC) {
+    raw_usage.Add(sizeof(CordRepCrc), repref);
+    repref = repref.Child(repref.rep->crc()->child);
+  }
+
+  if (IsDataEdge(repref.rep)) {
+    AnalyzeDataEdge(repref, raw_usage);
+  } else if (repref.rep->tag == BTREE) {
+    AnalyzeBtree(repref, raw_usage);
+  } else if (repref.rep->tag == RING) {
+    AnalyzeRing(repref, raw_usage);
+  } else {
+    assert(false);
+  }
+
+  return static_cast<size_t>(raw_usage.total);
+}
+
+}  // namespace
+
+size_t GetEstimatedMemoryUsage(const CordRep* rep) {
+  return GetEstimatedUsage<Mode::kTotal>(rep);
+}
+
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep) {
+  return GetEstimatedUsage<Mode::kFairShare>(rep);
+}
+
+size_t GetMorePreciseMemoryUsage(const CordRep* rep) {
+  return GetEstimatedUsage<Mode::kTotalMorePrecise>(rep);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/cord_analysis.h b/abseil-cpp/absl/strings/cord_analysis.h
new file mode 100644
index 0000000..9b9527a
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_analysis.h
@@ -0,0 +1,62 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORD_ANALYSIS_H_
+#define ABSL_STRINGS_CORD_ANALYSIS_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// Cord (which may not remain the same between invocations). Cords that share
+// memory could each be "charged" independently for the same shared memory.
+size_t GetEstimatedMemoryUsage(const CordRep* rep);
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// Cord for the distinct memory held by this cord. This is similar to
+// `GetEstimatedMemoryUsage()`, except that if the cord has multiple references
+// to the same memory, that memory is only counted once.
+//
+// For example:
+//   absl::Cord cord;
+//   cord.append(some_other_cord);
+//   cord.append(some_other_cord);
+//    // Calls GetEstimatedMemoryUsage() and counts `other_cord` twice:
+//   cord.EstimatedMemoryUsage(kTotal);
+//    // Calls GetMorePreciseMemoryUsage() and counts `other_cord` once:
+//   cord.EstimatedMemoryUsage(kTotalMorePrecise);
+//
+// This is more expensive than `GetEstimatedMemoryUsage()` as it requires
+// deduplicating all memory references.
+size_t GetMorePreciseMemoryUsage(const CordRep* rep);
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// CordRep weighted by the sharing ratio of that data. For example, if some data
+// edge is shared by 4 different Cords, then each cord is attribute 1/4th of
+// the total memory usage as a 'fair share' of the total memory usage.
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep);
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+
+#endif  // ABSL_STRINGS_CORD_ANALYSIS_H_
diff --git a/abseil-cpp/absl/strings/cord_buffer.cc b/abseil-cpp/absl/strings/cord_buffer.cc
new file mode 100644
index 0000000..fad6269
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_buffer.cc
@@ -0,0 +1,30 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/cord_buffer.h"
+
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordBuffer::kDefaultLimit;
+constexpr size_t CordBuffer::kCustomLimit;
+#endif
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/cord_buffer.h b/abseil-cpp/absl/strings/cord_buffer.h
new file mode 100644
index 0000000..bc0e4e4
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_buffer.h
@@ -0,0 +1,572 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: cord_buffer.h
+// -----------------------------------------------------------------------------
+//
+// This file defines an `absl::CordBuffer` data structure to hold data for
+// eventual inclusion within an existing `Cord` data structure. Cord buffers are
+// useful for building large Cords that may require custom allocation of its
+// associated memory.
+//
+#ifndef ABSL_STRINGS_CORD_BUFFER_H_
+#define ABSL_STRINGS_CORD_BUFFER_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/numeric/bits.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+class Cord;
+class CordBufferTestPeer;
+
+// CordBuffer
+//
+// CordBuffer manages memory buffers for purposes such as zero-copy APIs as well
+// as applications building cords with large data requiring granular control
+// over the allocation and size of cord data. For example, a function creating
+// a cord of random data could use a CordBuffer as follows:
+//
+//   absl::Cord CreateRandomCord(size_t length) {
+//     absl::Cord cord;
+//     while (length > 0) {
+//       CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(length);
+//       absl::Span<char> data = buffer.available_up_to(length);
+//       FillRandomValues(data.data(), data.size());
+//       buffer.IncreaseLengthBy(data.size());
+//       cord.Append(std::move(buffer));
+//       length -= data.size();
+//     }
+//     return cord;
+//   }
+//
+// CordBuffer instances are by default limited to a capacity of `kDefaultLimit`
+// bytes. `kDefaultLimit` is currently just under 4KiB, but this default may
+// change in the future and/or for specific architectures. The default limit is
+// aimed to provide a good trade-off between performance and memory overhead.
+// Smaller buffers typically incur more compute cost while larger buffers are
+// more CPU efficient but create significant memory overhead because of such
+// allocations being less granular. Using larger buffers may also increase the
+// risk of memory fragmentation.
+//
+// Applications create a buffer using one of the `CreateWithDefaultLimit()` or
+// `CreateWithCustomLimit()` methods. The returned instance will have a non-zero
+// capacity and a zero length. Applications use the `data()` method to set the
+// contents of the managed memory, and once done filling the buffer, use the
+// `IncreaseLengthBy()` or 'SetLength()' method to specify the length of the
+// initialized data before adding the buffer to a Cord.
+//
+// The `CreateWithCustomLimit()` method is intended for applications needing
+// larger buffers than the default memory limit, allowing the allocation of up
+// to a capacity of `kCustomLimit` bytes minus some minimum internal overhead.
+// The usage of `CreateWithCustomLimit()` should be limited to only those use
+// cases where the distribution of the input is relatively well known, and/or
+// where the trade-off between the efficiency gains outweigh the risk of memory
+// fragmentation. See the documentation for `CreateWithCustomLimit()` for more
+// information on using larger custom limits.
+//
+// The capacity of a `CordBuffer` returned by one of the `Create` methods may
+// be larger than the requested capacity due to rounding, alignment and
+// granularity of the memory allocator. Applications should use the `capacity`
+// method to obtain the effective capacity of the returned instance as
+// demonstrated in the provided example above.
+//
+// CordBuffer is a move-only class. All references into the managed memory are
+// invalidated when an instance is moved into either another CordBuffer instance
+// or a Cord. Writing to a location obtained by a previous call to `data()`
+// after an instance was moved will lead to undefined behavior.
+//
+// A `moved from` CordBuffer instance will have a valid, but empty state.
+// CordBuffer is thread compatible.
+class CordBuffer {
+ public:
+  // kDefaultLimit
+  //
+  // Default capacity limits of allocated CordBuffers.
+  // See the class comments for more information on allocation limits.
+  static constexpr size_t kDefaultLimit = cord_internal::kMaxFlatLength;
+
+  // kCustomLimit
+  //
+  // Maximum size for CreateWithCustomLimit() allocated buffers.
+  // Note that the effective capacity may be slightly less
+  // because of internal overhead of internal cord buffers.
+  static constexpr size_t kCustomLimit = 64U << 10;
+
+  // Constructors, Destructors and Assignment Operators
+
+  // Creates an empty CordBuffer.
+  CordBuffer() = default;
+
+  // Destroys this CordBuffer instance and, if not empty, releases any memory
+  // managed by this instance, invalidating previously returned references.
+  ~CordBuffer();
+
+  // CordBuffer is move-only
+  CordBuffer(CordBuffer&& rhs) noexcept;
+  CordBuffer& operator=(CordBuffer&&) noexcept;
+  CordBuffer(const CordBuffer&) = delete;
+  CordBuffer& operator=(const CordBuffer&) = delete;
+
+  // CordBuffer::MaximumPayload()
+  //
+  // Returns the guaranteed maximum payload for a CordBuffer returned by the
+  // `CreateWithDefaultLimit()` method. While small, each internal buffer inside
+  // a Cord incurs an overhead to manage the length, type and reference count
+  // for the buffer managed inside the cord tree. Applications can use this
+  // method to get approximate number of buffers required for a given byte
+  // size, etc.
+  //
+  // For example:
+  //   const size_t payload = absl::CordBuffer::MaximumPayload();
+  //   const size_t buffer_count = (total_size + payload - 1) / payload;
+  //   buffers.reserve(buffer_count);
+  static constexpr size_t MaximumPayload();
+
+  // Overload to the above `MaximumPayload()` except that it returns the
+  // maximum payload for a CordBuffer returned by the `CreateWithCustomLimit()`
+  // method given the provided `block_size`.
+  static constexpr size_t MaximumPayload(size_t block_size);
+
+  // CordBuffer::CreateWithDefaultLimit()
+  //
+  // Creates a CordBuffer instance of the desired `capacity`, capped at the
+  // default limit `kDefaultLimit`. The returned buffer has a guaranteed
+  // capacity of at least `min(kDefaultLimit, capacity)`. See the class comments
+  // for more information on buffer capacities and intended usage.
+  static CordBuffer CreateWithDefaultLimit(size_t capacity);
+
+  // CordBuffer::CreateWithCustomLimit()
+  //
+  // Creates a CordBuffer instance of the desired `capacity` rounded to an
+  // appropriate power of 2 size less than, or equal to `block_size`.
+  // Requires `block_size` to be a power of 2.
+  //
+  // If `capacity` is less than or equal to `kDefaultLimit`, then this method
+  // behaves identical to `CreateWithDefaultLimit`, which means that the caller
+  // is guaranteed to get a buffer of at least the requested capacity.
+  //
+  // If `capacity` is greater than or equal to `block_size`, then this method
+  // returns a buffer with an `allocated size` of `block_size` bytes. Otherwise,
+  // this methods returns a buffer with a suitable smaller power of 2 block size
+  // to satisfy the request. The actual size depends on a number of factors, and
+  // is typically (but not necessarily) the highest or second highest power of 2
+  // value less than or equal to `capacity`.
+  //
+  // The 'allocated size' includes a small amount of overhead required for
+  // internal state, which is currently 13 bytes on 64-bit platforms. For
+  // example: a buffer created with `block_size` and `capacity' set to 8KiB
+  // will have an allocated size of 8KiB, and an effective internal `capacity`
+  // of 8KiB - 13 = 8179 bytes.
+  //
+  // To demonstrate this in practice, let's assume we want to read data from
+  // somewhat larger files using approximately 64KiB buffers:
+  //
+  //   absl::Cord ReadFromFile(int fd, size_t n) {
+  //     absl::Cord cord;
+  //     while (n > 0) {
+  //       CordBuffer buffer = CordBuffer::CreateWithCustomLimit(64 << 10, n);
+  //       absl::Span<char> data = buffer.available_up_to(n);
+  //       ReadFileDataOrDie(fd, data.data(), data.size());
+  //       buffer.IncreaseLengthBy(data.size());
+  //       cord.Append(std::move(buffer));
+  //       n -= data.size();
+  //     }
+  //     return cord;
+  //   }
+  //
+  // If we'd use this function to read a file of 659KiB, we may get the
+  // following pattern of allocated cord buffer sizes:
+  //
+  //   CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523)
+  //   CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523)
+  //   ...
+  //   CreateWithCustomLimit(64KiB,  19586) --> ~16KiB (16371)
+  //   CreateWithCustomLimit(64KiB,   3215) -->   3215 (at least 3215)
+  //
+  // The reason the method returns a 16K buffer instead of a roughly 19K buffer
+  // is to reduce memory overhead and fragmentation risks. Using carefully
+  // chosen power of 2 values reduces the entropy of allocated memory sizes.
+  //
+  // Additionally, let's assume we'd use the above function on files that are
+  // generally smaller than 64K. If we'd use 'precise' sized buffers for such
+  // files, than we'd get a very wide distribution of allocated memory sizes
+  // rounded to 4K page sizes, and we'd end up with a lot of unused capacity.
+  //
+  // In general, application should only use custom sizes if the data they are
+  // consuming or storing is expected to be many times the chosen block size,
+  // and be based on objective data and performance metrics. For example, a
+  // compress function may work faster and consume less CPU when using larger
+  // buffers. Such an application should pick a size offering a reasonable
+  // trade-off between expected data size, compute savings with larger buffers,
+  // and the cost or fragmentation effect of larger buffers.
+  // Applications must pick a reasonable spot on that curve, and make sure their
+  // data meets their expectations in size distributions such as "mostly large".
+  static CordBuffer CreateWithCustomLimit(size_t block_size, size_t capacity);
+
+  // CordBuffer::available()
+  //
+  // Returns the span delineating the available capacity in this buffer
+  // which is defined as `{ data() + length(), capacity() - length() }`.
+  absl::Span<char> available();
+
+  // CordBuffer::available_up_to()
+  //
+  // Returns the span delineating the available capacity in this buffer limited
+  // to `size` bytes. This is equivalent to `available().subspan(0, size)`.
+  absl::Span<char> available_up_to(size_t size);
+
+  // CordBuffer::data()
+  //
+  // Returns a non-null reference to the data managed by this instance.
+  // Applications are allowed to write up to `capacity` bytes of instance data.
+  // CordBuffer data is uninitialized by default. Reading data from an instance
+  // that has not yet been initialized will lead to undefined behavior.
+  char* data();
+  const char* data() const;
+
+  // CordBuffer::length()
+  //
+  // Returns the length of this instance. The default length of a CordBuffer is
+  // 0, indicating an 'empty' CordBuffer. Applications must specify the length
+  // of the data in a CordBuffer before adding it to a Cord.
+  size_t length() const;
+
+  // CordBuffer::capacity()
+  //
+  // Returns the capacity of this instance. All instances have a non-zero
+  // capacity: default and `moved from` instances have a small internal buffer.
+  size_t capacity() const;
+
+  // CordBuffer::IncreaseLengthBy()
+  //
+  // Increases the length of this buffer by the specified 'n' bytes.
+  // Applications must make sure all data in this buffer up to the new length
+  // has been initialized before adding a CordBuffer to a Cord: failure to do so
+  // will lead to undefined behavior.  Requires `length() + n <= capacity()`.
+  // Typically, applications will use 'available_up_to()` to get a span of the
+  // desired capacity, and use `span.size()` to increase the length as in:
+  //   absl::Span<char> span = buffer.available_up_to(desired);
+  //   buffer.IncreaseLengthBy(span.size());
+  //   memcpy(span.data(), src, span.size());
+  //   etc...
+  void IncreaseLengthBy(size_t n);
+
+  // CordBuffer::SetLength()
+  //
+  // Sets the data length of this instance. Applications must make sure all data
+  // of the specified length has been initialized before adding a CordBuffer to
+  // a Cord: failure to do so will lead to undefined behavior.
+  // Setting the length to a small value or zero does not release any memory
+  // held by this CordBuffer instance. Requires `length <= capacity()`.
+  // Applications should preferably use the `IncreaseLengthBy()` method above
+  // in combination with the 'available()` or `available_up_to()` methods.
+  void SetLength(size_t length);
+
+ private:
+  // Make sure we don't accidentally over promise.
+  static_assert(kCustomLimit <= cord_internal::kMaxLargeFlatSize, "");
+
+  // Assume the cost of an 'uprounded' allocation to CeilPow2(size) versus
+  // the cost of allocating at least 1 extra flat <= 4KB:
+  // - Flat overhead = 13 bytes
+  // - Btree amortized cost / node =~ 13 bytes
+  // - 64 byte granularity of tcmalloc at 4K =~ 32 byte average
+  // CPU cost and efficiency requires we should at least 'save' something by
+  // splitting, as a poor man's measure, we say the slop needs to be
+  // at least double the cost offset to make it worth splitting: ~128 bytes.
+  static constexpr size_t kMaxPageSlop = 128;
+
+  // Overhead for allocation a flat.
+  static constexpr size_t kOverhead = cord_internal::kFlatOverhead;
+
+  using CordRepFlat = cord_internal::CordRepFlat;
+
+  // `Rep` is the internal data representation of a CordBuffer. The internal
+  // representation has an internal small size optimization similar to
+  // std::string (SSO).
+  struct Rep {
+    // Inline SSO size of a CordBuffer
+    static constexpr size_t kInlineCapacity = sizeof(intptr_t) * 2 - 1;
+
+    // Creates a default instance with kInlineCapacity.
+    Rep() : short_rep{} {}
+
+    // Creates an instance managing an allocated non zero CordRep.
+    explicit Rep(cord_internal::CordRepFlat* rep) : long_rep{rep} {
+      assert(rep != nullptr);
+    }
+
+    // Returns true if this instance manages the SSO internal buffer.
+    bool is_short() const {
+      constexpr size_t offset = offsetof(Short, raw_size);
+      return (reinterpret_cast<const char*>(this)[offset] & 1) != 0;
+    }
+
+    // Returns the available area of the internal SSO data
+    absl::Span<char> short_available() {
+      const size_t length = short_length();
+      return absl::Span<char>(short_rep.data + length,
+                              kInlineCapacity - length);
+    }
+
+    // Returns the available area of the internal SSO data
+    absl::Span<char> long_available() const {
+      assert(!is_short());
+      const size_t length = long_rep.rep->length;
+      return absl::Span<char>(long_rep.rep->Data() + length,
+                              long_rep.rep->Capacity() - length);
+    }
+
+    // Returns the length of the internal SSO data.
+    size_t short_length() const {
+      assert(is_short());
+      return static_cast<size_t>(short_rep.raw_size >> 1);
+    }
+
+    // Sets the length of the internal SSO data.
+    // Disregards any previously set CordRep instance.
+    void set_short_length(size_t length) {
+      short_rep.raw_size = static_cast<char>((length << 1) + 1);
+    }
+
+    // Adds `n` to the current short length.
+    void add_short_length(size_t n) {
+      assert(is_short());
+      short_rep.raw_size += static_cast<char>(n << 1);
+    }
+
+    // Returns reference to the internal SSO data buffer.
+    char* data() {
+      assert(is_short());
+      return short_rep.data;
+    }
+    const char* data() const {
+      assert(is_short());
+      return short_rep.data;
+    }
+
+    // Returns a pointer the external CordRep managed by this instance.
+    cord_internal::CordRepFlat* rep() const {
+      assert(!is_short());
+      return long_rep.rep;
+    }
+
+    // The internal representation takes advantage of the fact that allocated
+    // memory is always on an even address, and uses the least significant bit
+    // of the first or last byte (depending on endianness) as the inline size
+    // indicator overlapping with the least significant byte of the CordRep*.
+#if defined(ABSL_IS_BIG_ENDIAN)
+    struct Long {
+      explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
+      void* padding;
+      cord_internal::CordRepFlat* rep;
+    };
+    struct Short {
+      char data[sizeof(Long) - 1];
+      char raw_size = 1;
+    };
+#else
+    struct Long {
+      explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
+      cord_internal::CordRepFlat* rep;
+      void* padding;
+    };
+    struct Short {
+      char raw_size = 1;
+      char data[sizeof(Long) - 1];
+    };
+#endif
+
+    union {
+      Long long_rep;
+      Short short_rep;
+    };
+  };
+
+  // Power2 functions
+  static bool IsPow2(size_t size) { return absl::has_single_bit(size); }
+  static size_t Log2Floor(size_t size) {
+    return static_cast<size_t>(absl::bit_width(size) - 1);
+  }
+  static size_t Log2Ceil(size_t size) {
+    return static_cast<size_t>(absl::bit_width(size - 1));
+  }
+
+  // Implementation of `CreateWithCustomLimit()`.
+  // This implementation allows for future memory allocation hints to
+  // be passed down into the CordRepFlat allocation function.
+  template <typename... AllocationHints>
+  static CordBuffer CreateWithCustomLimitImpl(size_t block_size,
+                                              size_t capacity,
+                                              AllocationHints... hints);
+
+  // Consumes the value contained in this instance and resets the instance.
+  // This method returns a non-null Cordrep* if the current instances manages a
+  // CordRep*, and resets the instance to an empty SSO instance. If the current
+  // instance is an SSO instance, then this method returns nullptr and sets
+  // `short_value` to the inlined data value. In either case, the current
+  // instance length is reset to zero.
+  // This method is intended to be used by Cord internal functions only.
+  cord_internal::CordRep* ConsumeValue(absl::string_view& short_value) {
+    cord_internal::CordRep* rep = nullptr;
+    if (rep_.is_short()) {
+      short_value = absl::string_view(rep_.data(), rep_.short_length());
+    } else {
+      rep = rep_.rep();
+    }
+    rep_.set_short_length(0);
+    return rep;
+  }
+
+  // Internal constructor.
+  explicit CordBuffer(cord_internal::CordRepFlat* rep) : rep_(rep) {
+    assert(rep != nullptr);
+  }
+
+  Rep rep_;
+
+  friend class Cord;
+  friend class CordBufferTestPeer;
+};
+
+inline constexpr size_t CordBuffer::MaximumPayload() {
+  return cord_internal::kMaxFlatLength;
+}
+
+inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
+  return (std::min)(kCustomLimit, block_size) - cord_internal::kFlatOverhead;
+}
+
+inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
+  if (capacity > Rep::kInlineCapacity) {
+    auto* rep = cord_internal::CordRepFlat::New(capacity);
+    rep->length = 0;
+    return CordBuffer(rep);
+  }
+  return CordBuffer();
+}
+
+template <typename... AllocationHints>
+inline CordBuffer CordBuffer::CreateWithCustomLimitImpl(
+    size_t block_size, size_t capacity, AllocationHints... hints) {
+  assert(IsPow2(block_size));
+  capacity = (std::min)(capacity, kCustomLimit);
+  block_size = (std::min)(block_size, kCustomLimit);
+  if (capacity + kOverhead >= block_size) {
+    capacity = block_size;
+  } else if (capacity <= kDefaultLimit) {
+    capacity = capacity + kOverhead;
+  } else if (!IsPow2(capacity)) {
+    // Check if rounded up to next power 2 is a good enough fit
+    // with limited waste making it an acceptable direct fit.
+    const size_t rounded_up = size_t{1} << Log2Ceil(capacity);
+    const size_t slop = rounded_up - capacity;
+    if (slop >= kOverhead && slop <= kMaxPageSlop + kOverhead) {
+      capacity = rounded_up;
+    } else {
+      // Round down to highest power of 2 <= capacity.
+      // Consider a more aggressive step down if that may reduce the
+      // risk of fragmentation where 'people are holding it wrong'.
+      const size_t rounded_down = size_t{1} << Log2Floor(capacity);
+      capacity = rounded_down;
+    }
+  }
+  const size_t length = capacity - kOverhead;
+  auto* rep = CordRepFlat::New(CordRepFlat::Large(), length, hints...);
+  rep->length = 0;
+  return CordBuffer(rep);
+}
+
+inline CordBuffer CordBuffer::CreateWithCustomLimit(size_t block_size,
+                                                    size_t capacity) {
+  return CreateWithCustomLimitImpl(block_size, capacity);
+}
+
+inline CordBuffer::~CordBuffer() {
+  if (!rep_.is_short()) {
+    cord_internal::CordRepFlat::Delete(rep_.rep());
+  }
+}
+
+inline CordBuffer::CordBuffer(CordBuffer&& rhs) noexcept : rep_(rhs.rep_) {
+  rhs.rep_.set_short_length(0);
+}
+
+inline CordBuffer& CordBuffer::operator=(CordBuffer&& rhs) noexcept {
+  if (!rep_.is_short()) cord_internal::CordRepFlat::Delete(rep_.rep());
+  rep_ = rhs.rep_;
+  rhs.rep_.set_short_length(0);
+  return *this;
+}
+
+inline absl::Span<char> CordBuffer::available() {
+  return rep_.is_short() ? rep_.short_available() : rep_.long_available();
+}
+
+inline absl::Span<char> CordBuffer::available_up_to(size_t size) {
+  return available().subspan(0, size);
+}
+
+inline char* CordBuffer::data() {
+  return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
+}
+
+inline const char* CordBuffer::data() const {
+  return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
+}
+
+inline size_t CordBuffer::capacity() const {
+  return rep_.is_short() ? Rep::kInlineCapacity : rep_.rep()->Capacity();
+}
+
+inline size_t CordBuffer::length() const {
+  return rep_.is_short() ? rep_.short_length() : rep_.rep()->length;
+}
+
+inline void CordBuffer::SetLength(size_t length) {
+  ABSL_HARDENING_ASSERT(length <= capacity());
+  if (rep_.is_short()) {
+    rep_.set_short_length(length);
+  } else {
+    rep_.rep()->length = length;
+  }
+}
+
+inline void CordBuffer::IncreaseLengthBy(size_t n) {
+  ABSL_HARDENING_ASSERT(n <= capacity() && length() + n <= capacity());
+  if (rep_.is_short()) {
+    rep_.add_short_length(n);
+  } else {
+    rep_.rep()->length += n;
+  }
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_CORD_BUFFER_H_
diff --git a/abseil-cpp/absl/strings/cord_buffer_test.cc b/abseil-cpp/absl/strings/cord_buffer_test.cc
new file mode 100644
index 0000000..5c7437a
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_buffer_test.cc
@@ -0,0 +1,320 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/cord_buffer.h"
+
+
+#include <algorithm>
+#include <climits>
+#include <cstring>
+#include <string>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+#include "absl/types/span.h"
+
+using testing::Eq;
+using testing::Ge;
+using testing::Le;
+using testing::Ne;
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+class CordBufferTestPeer {
+ public:
+  static cord_internal::CordRep* ConsumeValue(CordBuffer& buffer,
+                                              absl::string_view& short_value) {
+    return buffer.ConsumeValue(short_value);
+  }
+};
+
+namespace {
+
+using ::absl::cordrep_testing::CordToString;
+
+constexpr size_t kInlinedSize = sizeof(CordBuffer) - 1;
+constexpr size_t kDefaultLimit = CordBuffer::kDefaultLimit;
+constexpr size_t kCustomLimit = CordBuffer::kCustomLimit;
+constexpr size_t kMaxFlatSize = cord_internal::kMaxFlatSize;
+constexpr size_t kMaxFlatLength = cord_internal::kMaxFlatLength;
+constexpr size_t kFlatOverhead = cord_internal::kFlatOverhead;
+
+constexpr size_t k8KiB = 8 << 10;
+constexpr size_t k16KiB = 16 << 10;
+constexpr size_t k64KiB = 64 << 10;
+constexpr size_t k1MB = 1 << 20;
+
+class CordBufferTest : public testing::TestWithParam<size_t> {};
+
+INSTANTIATE_TEST_SUITE_P(MediumSize, CordBufferTest,
+                         testing::Values(1, kInlinedSize - 1, kInlinedSize,
+                                         kInlinedSize + 1, kDefaultLimit - 1,
+                                         kDefaultLimit));
+
+TEST_P(CordBufferTest, MaximumPayload) {
+  EXPECT_THAT(CordBuffer::MaximumPayload(), Eq(kMaxFlatLength));
+  EXPECT_THAT(CordBuffer::MaximumPayload(512), Eq(512 - kFlatOverhead));
+  EXPECT_THAT(CordBuffer::MaximumPayload(k64KiB), Eq(k64KiB - kFlatOverhead));
+  EXPECT_THAT(CordBuffer::MaximumPayload(k1MB), Eq(k64KiB - kFlatOverhead));
+}
+
+TEST(CordBufferTest, ConstructDefault) {
+  CordBuffer buffer;
+  EXPECT_THAT(buffer.capacity(), Eq(sizeof(CordBuffer) - 1));
+  EXPECT_THAT(buffer.length(), Eq(0));
+  EXPECT_THAT(buffer.data(), Ne(nullptr));
+  EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
+  EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
+  memset(buffer.data(), 0xCD, buffer.capacity());
+}
+
+TEST(CordBufferTest, CreateSsoWithDefaultLimit) {
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(3);
+  EXPECT_THAT(buffer.capacity(), Ge(3));
+  EXPECT_THAT(buffer.capacity(), Le(sizeof(CordBuffer)));
+  EXPECT_THAT(buffer.length(), Eq(0));
+  memset(buffer.data(), 0xCD, buffer.capacity());
+
+  memcpy(buffer.data(), "Abc", 3);
+  buffer.SetLength(3);
+  EXPECT_THAT(buffer.length(), Eq(3));
+  absl::string_view short_value;
+  EXPECT_THAT(CordBufferTestPeer::ConsumeValue(buffer, short_value),
+              Eq(nullptr));
+  EXPECT_THAT(absl::string_view(buffer.data(), 3), Eq("Abc"));
+  EXPECT_THAT(short_value, Eq("Abc"));
+}
+
+TEST_P(CordBufferTest, Available) {
+  const size_t requested = GetParam();
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+  EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
+  EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
+
+  buffer.SetLength(2);
+  EXPECT_THAT(buffer.available().data(), Eq(buffer.data() + 2));
+  EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity() - 2));
+}
+
+TEST_P(CordBufferTest, IncreaseLengthBy) {
+  const size_t requested = GetParam();
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+  buffer.IncreaseLengthBy(2);
+  EXPECT_THAT(buffer.length(), Eq(2));
+  buffer.IncreaseLengthBy(5);
+  EXPECT_THAT(buffer.length(), Eq(7));
+}
+
+TEST_P(CordBufferTest, AvailableUpTo) {
+  const size_t requested = GetParam();
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+  size_t expected_up_to = std::min<size_t>(3, buffer.capacity());
+  EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data()));
+  EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
+
+  buffer.SetLength(2);
+  expected_up_to = std::min<size_t>(3, buffer.capacity() - 2);
+  EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data() + 2));
+  EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
+}
+
+// Returns the maximum capacity for a given block_size and requested size.
+size_t MaxCapacityFor(size_t block_size, size_t requested) {
+  requested = (std::min)(requested, cord_internal::kMaxLargeFlatSize);
+  // Maximum returned size is always capped at block_size - kFlatOverhead.
+  return block_size - kFlatOverhead;
+}
+
+TEST_P(CordBufferTest, CreateWithDefaultLimit) {
+  const size_t requested = GetParam();
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+  EXPECT_THAT(buffer.capacity(), Ge(requested));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
+  EXPECT_THAT(buffer.length(), Eq(0));
+
+  memset(buffer.data(), 0xCD, buffer.capacity());
+
+  std::string data(requested - 1, 'x');
+  memcpy(buffer.data(), data.c_str(), requested);
+  buffer.SetLength(requested);
+
+  EXPECT_THAT(buffer.length(), Eq(requested));
+  EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
+}
+
+TEST(CordBufferTest, CreateWithDefaultLimitAskingFor2GB) {
+  constexpr size_t k2GiB = 1U << 31;
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(k2GiB);
+  // Expect to never be awarded more than a reasonable memory size, even in
+  // cases where a (debug) memory allocator may grant us somewhat more memory
+  // than `kDefaultLimit` which should be no more than `2 * kDefaultLimit`
+  EXPECT_THAT(buffer.capacity(), Le(2 * CordBuffer::kDefaultLimit));
+  EXPECT_THAT(buffer.length(), Eq(0));
+  EXPECT_THAT(buffer.data(), Ne(nullptr));
+  memset(buffer.data(), 0xCD, buffer.capacity());
+}
+
+TEST_P(CordBufferTest, MoveConstruct) {
+  const size_t requested = GetParam();
+  CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
+  const size_t capacity = from.capacity();
+  memcpy(from.data(), "Abc", 4);
+  from.SetLength(4);
+
+  CordBuffer to(std::move(from));
+  EXPECT_THAT(to.capacity(), Eq(capacity));
+  EXPECT_THAT(to.length(), Eq(4));
+  EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
+
+  EXPECT_THAT(from.length(), Eq(0));  // NOLINT
+}
+
+TEST_P(CordBufferTest, MoveAssign) {
+  const size_t requested = GetParam();
+  CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
+  const size_t capacity = from.capacity();
+  memcpy(from.data(), "Abc", 4);
+  from.SetLength(4);
+
+  CordBuffer to;
+  to = std::move(from);
+  EXPECT_THAT(to.capacity(), Eq(capacity));
+  EXPECT_THAT(to.length(), Eq(4));
+  EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
+
+  EXPECT_THAT(from.length(), Eq(0));  // NOLINT
+}
+
+TEST_P(CordBufferTest, ConsumeValue) {
+  const size_t requested = GetParam();
+  CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+  memcpy(buffer.data(), "Abc", 4);
+  buffer.SetLength(3);
+
+  absl::string_view short_value;
+  if (cord_internal::CordRep* rep =
+          CordBufferTestPeer::ConsumeValue(buffer, short_value)) {
+    EXPECT_THAT(CordToString(rep), Eq("Abc"));
+    cord_internal::CordRep::Unref(rep);
+  } else {
+    EXPECT_THAT(short_value, Eq("Abc"));
+  }
+  EXPECT_THAT(buffer.length(), Eq(0));
+}
+
+TEST_P(CordBufferTest, CreateWithCustomLimitWithinDefaultLimit) {
+  const size_t requested = GetParam();
+  CordBuffer buffer =
+      CordBuffer::CreateWithCustomLimit(kMaxFlatSize, requested);
+  EXPECT_THAT(buffer.capacity(), Ge(requested));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
+  EXPECT_THAT(buffer.length(), Eq(0));
+
+  memset(buffer.data(), 0xCD, buffer.capacity());
+
+  std::string data(requested - 1, 'x');
+  memcpy(buffer.data(), data.c_str(), requested);
+  buffer.SetLength(requested);
+
+  EXPECT_THAT(buffer.length(), Eq(requested));
+  EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
+}
+
+TEST(CordLargeBufferTest, CreateAtOrBelowDefaultLimit) {
+  CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, kDefaultLimit);
+  EXPECT_THAT(buffer.capacity(), Ge(kDefaultLimit));
+  EXPECT_THAT(buffer.capacity(),
+              Le(MaxCapacityFor(kMaxFlatSize, kDefaultLimit)));
+
+  buffer = CordBuffer::CreateWithCustomLimit(k64KiB, 3178);
+  EXPECT_THAT(buffer.capacity(), Ge(3178));
+}
+
+TEST(CordLargeBufferTest, CreateWithCustomLimit) {
+  ASSERT_THAT((kMaxFlatSize & (kMaxFlatSize - 1)) == 0, "Must be power of 2");
+
+  for (size_t size = kMaxFlatSize; size <= kCustomLimit; size *= 2) {
+    CordBuffer buffer = CordBuffer::CreateWithCustomLimit(size, size);
+    size_t expected = size - kFlatOverhead;
+    ASSERT_THAT(buffer.capacity(), Ge(expected));
+    EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(size, expected)));
+  }
+}
+
+TEST(CordLargeBufferTest, CreateWithTooLargeLimit) {
+  CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, k1MB);
+  ASSERT_THAT(buffer.capacity(), Ge(k64KiB - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k64KiB, k1MB)));
+}
+
+TEST(CordLargeBufferTest, CreateWithHugeValueForOverFlowHardening) {
+  for (size_t dist_from_max = 0; dist_from_max <= 32; ++dist_from_max) {
+    size_t capacity = std::numeric_limits<size_t>::max() - dist_from_max;
+
+    CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(capacity);
+    ASSERT_THAT(buffer.capacity(), Ge(kDefaultLimit));
+    EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, capacity)));
+
+    for (size_t limit = kMaxFlatSize; limit <= kCustomLimit; limit *= 2) {
+      CordBuffer buffer = CordBuffer::CreateWithCustomLimit(limit, capacity);
+      ASSERT_THAT(buffer.capacity(), Ge(limit - kFlatOverhead));
+      EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(limit, capacity)));
+    }
+  }
+}
+
+TEST(CordLargeBufferTest, CreateWithSmallLimit) {
+  CordBuffer buffer = CordBuffer::CreateWithCustomLimit(512, 1024);
+  ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 1024)));
+
+  // Ask for precise block size, should return size - kOverhead
+  buffer = CordBuffer::CreateWithCustomLimit(512, 512);
+  ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 512)));
+
+  // Corner case: 511 < block_size, but 511 + kOverhead is above
+  buffer = CordBuffer::CreateWithCustomLimit(512, 511);
+  ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 511)));
+
+  // Corner case: 498 + kOverhead < block_size
+  buffer = CordBuffer::CreateWithCustomLimit(512, 498);
+  ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 498)));
+}
+
+TEST(CordLargeBufferTest, CreateWasteFull) {
+  // 15 KiB gets rounded down to next pow2 value.
+  const size_t requested = (15 << 10);
+  CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
+  ASSERT_THAT(buffer.capacity(), Ge(k8KiB - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k8KiB, requested)));
+}
+
+TEST(CordLargeBufferTest, CreateSmallSlop) {
+  const size_t requested = k16KiB - 2 * kFlatOverhead;
+  CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
+  ASSERT_THAT(buffer.capacity(), Ge(k16KiB - kFlatOverhead));
+  EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k16KiB, requested)));
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/cord_ring_reader_test.cc b/abseil-cpp/absl/strings/cord_ring_reader_test.cc
new file mode 100644
index 0000000..8e7183b
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_ring_reader_test.cc
@@ -0,0 +1,180 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdlib>
+#include <ctime>
+#include <memory>
+#include <random>
+#include <sstream>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/debugging/leak_check.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/internal/cord_rep_ring_reader.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using testing::Eq;
+
+// Creates a flat for testing
+CordRep* MakeFlat(absl::string_view s) {
+  CordRepFlat* flat = CordRepFlat::New(s.length());
+  memcpy(flat->Data(), s.data(), s.length());
+  flat->length = s.length();
+  return flat;
+}
+
+CordRepRing* FromFlats(Span<absl::string_view const> flats) {
+  CordRepRing* ring = CordRepRing::Create(MakeFlat(flats[0]), flats.size() - 1);
+  for (int i = 1; i < flats.size(); ++i) {
+    ring = CordRepRing::Append(ring, MakeFlat(flats[i]));
+  }
+  return ring;
+}
+
+std::array<absl::string_view, 12> TestFlats() {
+  return {"abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+          "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+          "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+}
+
+TEST(CordRingReaderTest, DefaultInstance) {
+  CordRepRingReader reader;
+  EXPECT_FALSE(static_cast<bool>(reader));
+  EXPECT_THAT(reader.ring(), Eq(nullptr));
+#ifndef NDEBUG
+  EXPECT_DEATH_IF_SUPPORTED(reader.length(), ".*");
+  EXPECT_DEATH_IF_SUPPORTED(reader.consumed(), ".*");
+  EXPECT_DEATH_IF_SUPPORTED(reader.remaining(), ".*");
+  EXPECT_DEATH_IF_SUPPORTED(reader.Next(), ".*");
+  EXPECT_DEATH_IF_SUPPORTED(reader.Seek(0), ".*");
+#endif
+}
+
+TEST(CordRingReaderTest, Reset) {
+  CordRepRingReader reader;
+  auto flats = TestFlats();
+  CordRepRing* ring = FromFlats(flats);
+
+  absl::string_view first = reader.Reset(ring);
+  EXPECT_THAT(first, Eq(flats[0]));
+  EXPECT_TRUE(static_cast<bool>(reader));
+  EXPECT_THAT(reader.ring(), Eq(ring));
+  EXPECT_THAT(reader.index(), Eq(ring->head()));
+  EXPECT_THAT(reader.node(), Eq(ring->entry_child(ring->head())));
+  EXPECT_THAT(reader.length(), Eq(ring->length));
+  EXPECT_THAT(reader.consumed(), Eq(flats[0].length()));
+  EXPECT_THAT(reader.remaining(), Eq(ring->length - reader.consumed()));
+
+  reader.Reset();
+  EXPECT_FALSE(static_cast<bool>(reader));
+  EXPECT_THAT(reader.ring(), Eq(nullptr));
+
+  CordRep::Unref(ring);
+}
+
+TEST(CordRingReaderTest, Next) {
+  CordRepRingReader reader;
+  auto flats = TestFlats();
+  CordRepRing* ring = FromFlats(flats);
+  CordRepRing::index_type head = ring->head();
+
+  reader.Reset(ring);
+  size_t consumed = reader.consumed();
+  size_t remaining = reader.remaining();
+  for (int i = 1; i < flats.size(); ++i) {
+    CordRepRing::index_type index = ring->advance(head, i);
+    consumed += flats[i].length();
+    remaining -= flats[i].length();
+    absl::string_view next = reader.Next();
+    ASSERT_THAT(next, Eq(flats[i]));
+    ASSERT_THAT(reader.index(), Eq(index));
+    ASSERT_THAT(reader.node(), Eq(ring->entry_child(index)));
+    ASSERT_THAT(reader.consumed(), Eq(consumed));
+    ASSERT_THAT(reader.remaining(), Eq(remaining));
+  }
+
+#ifndef NDEBUG
+  EXPECT_DEATH_IF_SUPPORTED(reader.Next(), ".*");
+#endif
+
+  CordRep::Unref(ring);
+}
+
+TEST(CordRingReaderTest, SeekForward) {
+  CordRepRingReader reader;
+  auto flats = TestFlats();
+  CordRepRing* ring = FromFlats(flats);
+  CordRepRing::index_type head = ring->head();
+
+  reader.Reset(ring);
+  size_t consumed = 0;
+  size_t remaining = ring->length;
+  for (int i = 0; i < flats.size(); ++i) {
+    CordRepRing::index_type index = ring->advance(head, i);
+    size_t offset = consumed;
+    consumed += flats[i].length();
+    remaining -= flats[i].length();
+    for (int off = 0; off < flats[i].length(); ++off) {
+      absl::string_view chunk = reader.Seek(offset + off);
+      ASSERT_THAT(chunk, Eq(flats[i].substr(off)));
+      ASSERT_THAT(reader.index(), Eq(index));
+      ASSERT_THAT(reader.node(), Eq(ring->entry_child(index)));
+      ASSERT_THAT(reader.consumed(), Eq(consumed));
+      ASSERT_THAT(reader.remaining(), Eq(remaining));
+    }
+  }
+
+  CordRep::Unref(ring);
+}
+
+TEST(CordRingReaderTest, SeekBackward) {
+  CordRepRingReader reader;
+  auto flats = TestFlats();
+  CordRepRing* ring = FromFlats(flats);
+  CordRepRing::index_type head = ring->head();
+
+  reader.Reset(ring);
+  size_t consumed = ring->length;
+  size_t remaining = 0;
+  for (int i = flats.size() - 1; i >= 0; --i) {
+    CordRepRing::index_type index = ring->advance(head, i);
+    size_t offset = consumed - flats[i].length();
+    for (int off = 0; off < flats[i].length(); ++off) {
+      absl::string_view chunk = reader.Seek(offset + off);
+      ASSERT_THAT(chunk, Eq(flats[i].substr(off)));
+      ASSERT_THAT(reader.index(), Eq(index));
+      ASSERT_THAT(reader.node(), Eq(ring->entry_child(index)));
+      ASSERT_THAT(reader.consumed(), Eq(consumed));
+      ASSERT_THAT(reader.remaining(), Eq(remaining));
+    }
+    consumed -= flats[i].length();
+    remaining += flats[i].length();
+  }
+#ifndef NDEBUG
+  EXPECT_DEATH_IF_SUPPORTED(reader.Seek(ring->length), ".*");
+#endif
+  CordRep::Unref(ring);
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/cord_ring_test.cc b/abseil-cpp/absl/strings/cord_ring_test.cc
new file mode 100644
index 0000000..f39a0a4
--- /dev/null
+++ b/abseil-cpp/absl/strings/cord_ring_test.cc
@@ -0,0 +1,1454 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdlib>
+#include <ctime>
+#include <memory>
+#include <random>
+#include <sstream>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/debugging/leak_check.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+extern thread_local bool cord_ring;
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+using RandomEngine = std::mt19937_64;
+
+using ::absl::cord_internal::CordRep;
+using ::absl::cord_internal::CordRepConcat;
+using ::absl::cord_internal::CordRepExternal;
+using ::absl::cord_internal::CordRepFlat;
+using ::absl::cord_internal::CordRepRing;
+using ::absl::cord_internal::CordRepSubstring;
+
+using ::absl::cord_internal::EXTERNAL;
+using ::absl::cord_internal::SUBSTRING;
+
+using testing::ElementsAre;
+using testing::ElementsAreArray;
+using testing::Eq;
+using testing::Ge;
+using testing::Le;
+using testing::Lt;
+using testing::Ne;
+using testing::SizeIs;
+
+using index_type = CordRepRing::index_type;
+
+enum InputShareMode { kPrivate, kShared, kSharedIndirect };
+
+// TestParam class used by all test fixtures.
+// Not all fixtures use all possible input combinations
+struct TestParam {
+  TestParam() = default;
+  explicit TestParam(InputShareMode input_share_mode)
+      : input_share_mode(input_share_mode) {}
+
+  // Run the test with the 'rep under test' to be privately owned.
+  // Otherwise, the rep has a shared ref count of 2 or higher.
+  bool refcount_is_one = true;
+
+  // Run the test with the 'rep under test' being allocated with enough capacity
+  // to accommodate any modifications made to it. Otherwise, the rep has zero
+  // extra (reserve) capacity.
+  bool with_capacity = true;
+
+  // For test providing possibly shared input such as Append(.., CordpRep*),
+  // this field defines if that input is adopted with a refcount of one
+  // (privately owned / donated), or shared. For composite inputs such as
+  // 'substring of flat', we also have the 'shared indirect' value which means
+  // the top level node is not shared, but the contained child node is shared.
+  InputShareMode input_share_mode = kPrivate;
+
+  std::string ToString() const {
+    return absl::StrCat(refcount_is_one ? "Private" : "Shared",
+                        with_capacity ? "" : "_NoCapacity",
+                        (input_share_mode == kPrivate) ? ""
+                        : (input_share_mode == kShared)
+                            ? "_SharedInput"
+                            : "_IndirectSharedInput");
+  }
+};
+using TestParams = std::vector<TestParam>;
+
+// Matcher validating when mutable copies are required / performed.
+MATCHER_P2(EqIfPrivate, param, rep,
+           absl::StrCat("Equal 0x", absl::Hex(rep), " if private")) {
+  return param.refcount_is_one ? arg == rep : true;
+}
+
+// Matcher validating when mutable copies are required / performed.
+MATCHER_P2(EqIfPrivateAndCapacity, param, rep,
+           absl::StrCat("Equal 0x", absl::Hex(rep),
+                        " if private and capacity")) {
+  return (param.refcount_is_one && param.with_capacity) ? arg == rep : true;
+}
+
+// Matcher validating a shared ring was re-allocated. Should only be used for
+// tests doing exactly one update as subsequent updates could return the
+// original (freed and re-used) pointer.
+MATCHER_P2(NeIfShared, param, rep,
+           absl::StrCat("Not equal 0x", absl::Hex(rep), " if shared")) {
+  return param.refcount_is_one ? true : arg != rep;
+}
+
+MATCHER_P2(EqIfInputPrivate, param, rep, "Equal if input is private") {
+  return param.input_share_mode == kPrivate ? arg == rep : arg != rep;
+}
+
+// Matcher validating the core in-variants of the CordRepRing instance.
+MATCHER(IsValidRingBuffer, "RingBuffer is valid") {
+  std::stringstream ss;
+  if (!arg->IsValid(ss)) {
+    *result_listener << "\nERROR: " << ss.str() << "\nRING = " << *arg;
+    return false;
+  }
+  return true;
+}
+
+// Returns the flats contained in the provided CordRepRing
+std::vector<string_view> ToFlats(const CordRepRing* r) {
+  std::vector<string_view> flats;
+  flats.reserve(r->entries());
+  index_type pos = r->head();
+  do {
+    flats.push_back(r->entry_data(pos));
+  } while ((pos = r->advance(pos)) != r->tail());
+  return flats;
+}
+
+class not_a_string_view {
+ public:
+  explicit not_a_string_view(absl::string_view s)
+      : data_(s.data()), size_(s.size()) {}
+  explicit not_a_string_view(const void* data, size_t size)
+      : data_(data), size_(size) {}
+
+  not_a_string_view remove_prefix(size_t n) const {
+    return not_a_string_view(static_cast<const char*>(data_) + n, size_ - n);
+  }
+
+  not_a_string_view remove_suffix(size_t n) const {
+    return not_a_string_view(data_, size_ - n);
+  }
+
+  const void* data() const { return data_; }
+  size_t size() const { return size_; }
+
+ private:
+  const void* data_;
+  size_t size_;
+};
+
+bool operator==(not_a_string_view lhs, not_a_string_view rhs) {
+  return lhs.data() == rhs.data() && lhs.size() == rhs.size();
+}
+
+std::ostream& operator<<(std::ostream& s, not_a_string_view rhs) {
+  return s << "{ data: " << rhs.data() << " size: " << rhs.size() << "}";
+}
+
+std::vector<not_a_string_view> ToRawFlats(const CordRepRing* r) {
+  std::vector<not_a_string_view> flats;
+  flats.reserve(r->entries());
+  index_type pos = r->head();
+  do {
+    flats.emplace_back(r->entry_data(pos));
+  } while ((pos = r->advance(pos)) != r->tail());
+  return flats;
+}
+
+// Returns the value contained in the provided CordRepRing
+std::string ToString(const CordRepRing* r) {
+  std::string value;
+  value.reserve(r->length);
+  index_type pos = r->head();
+  do {
+    absl::string_view sv = r->entry_data(pos);
+    value.append(sv.data(), sv.size());
+  } while ((pos = r->advance(pos)) != r->tail());
+  return value;
+}
+
+// Creates a flat for testing
+CordRep* MakeFlat(absl::string_view s, size_t extra = 0) {
+  CordRepFlat* flat = CordRepFlat::New(s.length() + extra);
+  memcpy(flat->Data(), s.data(), s.length());
+  flat->length = s.length();
+  return flat;
+}
+
+// Creates an external node for testing
+CordRepExternal* MakeExternal(absl::string_view s) {
+  struct Rep : public CordRepExternal {
+    std::string s;
+    explicit Rep(absl::string_view s) : s(s) {
+      this->tag = EXTERNAL;
+      this->base = s.data();
+      this->length = s.length();
+      this->releaser_invoker = [](CordRepExternal* self) {
+        delete static_cast<Rep*>(self);
+      };
+    }
+  };
+  return new Rep(s);
+}
+
+CordRepExternal* MakeFakeExternal(size_t length) {
+  struct Rep : public CordRepExternal {
+    std::string s;
+    explicit Rep(size_t len) {
+      this->tag = EXTERNAL;
+      this->base = reinterpret_cast<const char*>(this->storage);
+      this->length = len;
+      this->releaser_invoker = [](CordRepExternal* self) {
+        delete static_cast<Rep*>(self);
+      };
+    }
+  };
+  return new Rep(length);
+}
+
+// Creates a flat or an external node for testing depending on the size.
+CordRep* MakeLeaf(absl::string_view s, size_t extra = 0) {
+  if (s.size() <= absl::cord_internal::kMaxFlatLength) {
+    return MakeFlat(s, extra);
+  } else {
+    return MakeExternal(s);
+  }
+}
+
+// Creates a substring node
+CordRepSubstring* MakeSubstring(size_t start, size_t len, CordRep* rep) {
+  auto* sub = new CordRepSubstring;
+  sub->tag = SUBSTRING;
+  sub->start = start;
+  sub->length = (len <= 0) ? rep->length - start + len : len;
+  sub->child = rep;
+  return sub;
+}
+
+// Creates a substring node removing the specified prefix
+CordRepSubstring* RemovePrefix(size_t start, CordRep* rep) {
+  return MakeSubstring(start, rep->length - start, rep);
+}
+
+// Creates a substring node removing the specified suffix
+CordRepSubstring* RemoveSuffix(size_t length, CordRep* rep) {
+  return MakeSubstring(0, rep->length - length, rep);
+}
+
+enum Composition { kMix, kAppend, kPrepend };
+
+Composition RandomComposition() {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
+  return (rng() & 1) ? kMix : ((rng() & 1) ? kAppend : kPrepend);
+}
+
+absl::string_view ToString(Composition composition) {
+  switch (composition) {
+    case kAppend:
+      return "Append";
+    case kPrepend:
+      return "Prepend";
+    case kMix:
+      return "Mix";
+  }
+  assert(false);
+  return "???";
+}
+
+constexpr const char* kFox = "The quick brown fox jumps over the lazy dog";
+constexpr const char* kFoxFlats[] = {"The ", "quick ", "brown ",
+                                     "fox ", "jumps ", "over ",
+                                     "the ", "lazy ",  "dog"};
+
+CordRepRing* FromFlats(Span<const char* const> flats,
+                       Composition composition = kAppend) {
+  if (flats.empty()) return nullptr;
+  CordRepRing* ring = nullptr;
+  switch (composition) {
+    case kAppend:
+      ring = CordRepRing::Create(MakeLeaf(flats.front()), flats.size() - 1);
+      for (int i = 1; i < flats.size(); ++i) {
+        ring = CordRepRing::Append(ring, MakeLeaf(flats[i]));
+      }
+      break;
+    case kPrepend:
+      ring = CordRepRing::Create(MakeLeaf(flats.back()), flats.size() - 1);
+      for (int i = static_cast<int>(flats.size() - 2); i >= 0; --i) {
+        ring = CordRepRing::Prepend(ring, MakeLeaf(flats[i]));
+      }
+      break;
+    case kMix:
+      size_t middle1 = flats.size() / 2, middle2 = middle1;
+      ring = CordRepRing::Create(MakeLeaf(flats[middle1]), flats.size() - 1);
+      if (!flats.empty()) {
+        if ((flats.size() & 1) == 0) {
+          ring = CordRepRing::Prepend(ring, MakeLeaf(flats[--middle1]));
+        }
+        for (int i = 1; i <= middle1; ++i) {
+          ring = CordRepRing::Prepend(ring, MakeLeaf(flats[middle1 - i]));
+          ring = CordRepRing::Append(ring, MakeLeaf(flats[middle2 + i]));
+        }
+      }
+      break;
+  }
+  EXPECT_THAT(ToFlats(ring), ElementsAreArray(flats));
+  return ring;
+}
+
+std::ostream& operator<<(std::ostream& s, const TestParam& param) {
+  return s << param.ToString();
+}
+
+std::string TestParamToString(const testing::TestParamInfo<TestParam>& info) {
+  return info.param.ToString();
+}
+
+class CordRingTest : public testing::Test {
+ public:
+  ~CordRingTest() override {
+    for (CordRep* rep : unrefs_) {
+      CordRep::Unref(rep);
+    }
+  }
+
+  template <typename CordRepType>
+  CordRepType* NeedsUnref(CordRepType* rep) {
+    assert(rep);
+    unrefs_.push_back(rep);
+    return rep;
+  }
+
+  template <typename CordRepType>
+  CordRepType* Ref(CordRepType* rep) {
+    CordRep::Ref(rep);
+    return NeedsUnref(rep);
+  }
+
+ private:
+  std::vector<CordRep*> unrefs_;
+};
+
+class CordRingTestWithParam : public testing::TestWithParam<TestParam> {
+ public:
+  ~CordRingTestWithParam() override {
+    for (CordRep* rep : unrefs_) {
+      CordRep::Unref(rep);
+    }
+  }
+
+  CordRepRing* CreateWithCapacity(CordRep* child, size_t extra_capacity) {
+    if (!GetParam().with_capacity) extra_capacity = 0;
+    CordRepRing* ring = CordRepRing::Create(child, extra_capacity);
+    ring->SetCapacityForTesting(1 + extra_capacity);
+    return RefIfShared(ring);
+  }
+
+  bool Shared() const { return !GetParam().refcount_is_one; }
+  bool InputShared() const { return GetParam().input_share_mode == kShared; }
+  bool InputSharedIndirect() const {
+    return GetParam().input_share_mode == kSharedIndirect;
+  }
+
+  template <typename CordRepType>
+  CordRepType* NeedsUnref(CordRepType* rep) {
+    assert(rep);
+    unrefs_.push_back(rep);
+    return rep;
+  }
+
+  template <typename CordRepType>
+  CordRepType* Ref(CordRepType* rep) {
+    CordRep::Ref(rep);
+    return NeedsUnref(rep);
+  }
+
+  template <typename CordRepType>
+  CordRepType* RefIfShared(CordRepType* rep) {
+    return Shared() ? Ref(rep) : rep;
+  }
+
+  template <typename CordRepType>
+  CordRepType* RefIfInputShared(CordRepType* rep) {
+    return InputShared() ? Ref(rep) : rep;
+  }
+
+  template <typename CordRepType>
+  CordRepType* RefIfInputSharedIndirect(CordRepType* rep) {
+    return InputSharedIndirect() ? Ref(rep) : rep;
+  }
+
+ private:
+  std::vector<CordRep*> unrefs_;
+};
+
+class CordRingCreateTest : public CordRingTestWithParam {
+ public:
+  static TestParams CreateTestParams() {
+    TestParams params;
+    params.emplace_back(InputShareMode::kPrivate);
+    params.emplace_back(InputShareMode::kShared);
+    return params;
+  }
+};
+
+class CordRingSubTest : public CordRingTestWithParam {
+ public:
+  static TestParams CreateTestParams() {
+    TestParams params;
+    for (bool refcount_is_one : {true, false}) {
+      TestParam param;
+      param.refcount_is_one = refcount_is_one;
+      params.push_back(param);
+    }
+    return params;
+  }
+};
+
+class CordRingBuildTest : public CordRingTestWithParam {
+ public:
+  static TestParams CreateTestParams() {
+    TestParams params;
+    for (bool refcount_is_one : {true, false}) {
+      for (bool with_capacity : {true, false}) {
+        TestParam param;
+        param.refcount_is_one = refcount_is_one;
+        param.with_capacity = with_capacity;
+        params.push_back(param);
+      }
+    }
+    return params;
+  }
+};
+
+class CordRingCreateFromTreeTest : public CordRingTestWithParam {
+ public:
+  static TestParams CreateTestParams() {
+    TestParams params;
+    params.emplace_back(InputShareMode::kPrivate);
+    params.emplace_back(InputShareMode::kShared);
+    params.emplace_back(InputShareMode::kSharedIndirect);
+    return params;
+  }
+};
+
+class CordRingBuildInputTest : public CordRingTestWithParam {
+ public:
+  static TestParams CreateTestParams() {
+    TestParams params;
+    for (bool refcount_is_one : {true, false}) {
+      for (bool with_capacity : {true, false}) {
+        for (InputShareMode share_mode : {kPrivate, kShared, kSharedIndirect}) {
+          TestParam param;
+          param.refcount_is_one = refcount_is_one;
+          param.with_capacity = with_capacity;
+          param.input_share_mode = share_mode;
+          params.push_back(param);
+        }
+      }
+    }
+    return params;
+  }
+};
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordRingSubTest,
+                         testing::ValuesIn(CordRingSubTest::CreateTestParams()),
+                         TestParamToString);
+
+INSTANTIATE_TEST_SUITE_P(
+    WithParam, CordRingCreateTest,
+    testing::ValuesIn(CordRingCreateTest::CreateTestParams()),
+    TestParamToString);
+
+INSTANTIATE_TEST_SUITE_P(
+    WithParam, CordRingCreateFromTreeTest,
+    testing::ValuesIn(CordRingCreateFromTreeTest::CreateTestParams()),
+    TestParamToString);
+
+INSTANTIATE_TEST_SUITE_P(
+    WithParam, CordRingBuildTest,
+    testing::ValuesIn(CordRingBuildTest::CreateTestParams()),
+    TestParamToString);
+
+INSTANTIATE_TEST_SUITE_P(
+    WithParam, CordRingBuildInputTest,
+    testing::ValuesIn(CordRingBuildInputTest::CreateTestParams()),
+    TestParamToString);
+
+TEST_P(CordRingCreateTest, CreateFromFlat) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(MakeFlat(str1)));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(str1.size()));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1));
+}
+
+TEST_P(CordRingCreateTest, CreateFromRing) {
+  CordRepRing* ring = RefIfShared(FromFlats(kFoxFlats));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(ring));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAreArray(kFoxFlats));
+}
+
+TEST_P(CordRingCreateFromTreeTest, CreateFromSubstringRing) {
+  CordRepRing* ring = RefIfInputSharedIndirect(FromFlats(kFoxFlats));
+  CordRep* sub = RefIfInputShared(MakeSubstring(2, 11, ring));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(sub));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfInputPrivate(GetParam(), ring));
+  EXPECT_THAT(ToString(result), string_view(kFox).substr(2, 11));
+}
+
+TEST_F(CordRingTest, CreateWithIllegalExtraCapacity) {
+#if defined(ABSL_HAVE_EXCEPTIONS)
+  CordRep* flat = NeedsUnref(MakeFlat("Hello world"));
+  try {
+    CordRepRing::Create(flat, CordRepRing::kMaxCapacity);
+    GTEST_FAIL() << "expected std::length_error exception";
+  } catch (const std::length_error&) {
+  }
+#elif defined(GTEST_HAS_DEATH_TEST)
+  CordRep* flat = NeedsUnref(MakeFlat("Hello world"));
+  EXPECT_DEATH(CordRepRing::Create(flat, CordRepRing::kMaxCapacity), ".*");
+#endif
+}
+
+TEST_P(CordRingCreateFromTreeTest, CreateFromSubstringOfFlat) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  auto* flat = RefIfInputShared(MakeFlat(str1));
+  auto* child = RefIfInputSharedIndirect(MakeSubstring(4, 20, flat));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(20));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1.substr(4, 20)));
+}
+
+TEST_P(CordRingCreateTest, CreateFromExternal) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  auto* child = RefIfInputShared(MakeExternal(str1));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(str1.size()));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1));
+}
+
+TEST_P(CordRingCreateFromTreeTest, CreateFromSubstringOfExternal) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  auto* external = RefIfInputShared(MakeExternal(str1));
+  auto* child = RefIfInputSharedIndirect(MakeSubstring(1, 24, external));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(24));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1.substr(1, 24)));
+}
+
+TEST_P(CordRingCreateFromTreeTest, CreateFromSubstringOfLargeExternal) {
+  auto* external = RefIfInputShared(MakeFakeExternal(1 << 20));
+  auto str = not_a_string_view(external->base, 1 << 20)
+                 .remove_prefix(1 << 19)
+                 .remove_suffix(6);
+  auto* child =
+      RefIfInputSharedIndirect(MakeSubstring(1 << 19, (1 << 19) - 6, external));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(str.size()));
+  EXPECT_THAT(ToRawFlats(result), ElementsAre(str));
+}
+
+TEST_P(CordRingCreateTest, Properties) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(MakeFlat(str1), 120));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->head(), Eq(0));
+  EXPECT_THAT(result->tail(), Eq(1));
+  EXPECT_THAT(result->capacity(), Ge(120 + 1));
+  EXPECT_THAT(result->capacity(), Le(2 * 120 + 1));
+  EXPECT_THAT(result->entries(), Eq(1));
+  EXPECT_THAT(result->begin_pos(), Eq(0));
+}
+
+TEST_P(CordRingCreateTest, EntryForNewFlat) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  CordRep* child = MakeFlat(str1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(child, 120));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->entry_child(0), Eq(child));
+  EXPECT_THAT(result->entry_end_pos(0), Eq(str1.length()));
+  EXPECT_THAT(result->entry_data_offset(0), Eq(0));
+}
+
+TEST_P(CordRingCreateTest, EntryForNewFlatSubstring) {
+  absl::string_view str1 = "1234567890abcdefghijklmnopqrstuvwxyz";
+  CordRep* child = MakeFlat(str1);
+  CordRep* substring = MakeSubstring(10, 26, child);
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(substring, 1));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->entry_child(0), Eq(child));
+  EXPECT_THAT(result->entry_end_pos(0), Eq(26));
+  EXPECT_THAT(result->entry_data_offset(0), Eq(10));
+}
+
+TEST_P(CordRingBuildTest, AppendFlat) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRepRing* ring = CreateWithCapacity(MakeExternal(str1), 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, MakeFlat(str2)));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size()));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1, str2));
+}
+
+TEST_P(CordRingBuildTest, PrependFlat) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRepRing* ring = CreateWithCapacity(MakeExternal(str1), 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, MakeFlat(str2)));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size()));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str2, str1));
+}
+
+TEST_P(CordRingBuildTest, AppendString) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRepRing* ring = CreateWithCapacity(MakeExternal(str1), 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, str2));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size()));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1, str2));
+}
+
+TEST_P(CordRingBuildTest, AppendStringHavingExtra) {
+  absl::string_view str1 = "1234";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRepRing* ring = CreateWithCapacity(MakeFlat(str1, 26), 0);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, str2));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size()));
+  EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+}
+
+TEST_P(CordRingBuildTest, AppendStringHavingPartialExtra) {
+  absl::string_view str1 = "1234";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+  // Create flat with at least one extra byte. We don't expect to have sized
+  // alloc and capacity rounding to grant us enough to not make it partial.
+  auto* flat = MakeFlat(str1, 1);
+  size_t avail = flat->flat()->Capacity() - flat->length;
+  ASSERT_THAT(avail, Lt(str2.size())) << " adjust test for larger flats!";
+
+  // Construct the flats we do expect using all of `avail`.
+  absl::string_view str1a = str2.substr(0, avail);
+  absl::string_view str2a = str2.substr(avail);
+
+  CordRepRing* ring = CreateWithCapacity(flat, 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, str2));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size()));
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  if (GetParam().refcount_is_one) {
+    EXPECT_THAT(ToFlats(result), ElementsAre(StrCat(str1, str1a), str2a));
+  } else {
+    EXPECT_THAT(ToFlats(result), ElementsAre(str1, str2));
+  }
+}
+
+TEST_P(CordRingBuildTest, AppendStringHavingExtraInSubstring) {
+  absl::string_view str1 = "123456789_1234";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRep* flat = RemovePrefix(10, MakeFlat(str1, 26));
+  CordRepRing* ring = CreateWithCapacity(flat, 0);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, str2));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(result->length, Eq(4 + str2.size()));
+  if (GetParam().refcount_is_one) {
+    EXPECT_THAT(ToFlats(result), ElementsAre(StrCat("1234", str2)));
+  } else {
+    EXPECT_THAT(ToFlats(result), ElementsAre("1234", str2));
+  }
+}
+
+TEST_P(CordRingBuildTest, AppendStringHavingSharedExtra) {
+  absl::string_view str1 = "123456789_1234";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  for (int shared_type = 0; shared_type < 2; ++shared_type) {
+    SCOPED_TRACE(absl::StrCat("Shared extra type ", shared_type));
+
+    // Create a flat that is shared in some way.
+    CordRep* flat = nullptr;
+    CordRep* flat1 = nullptr;
+    if (shared_type == 0) {
+      // Shared flat
+      flat = CordRep::Ref(MakeFlat(str1.substr(10), 100));
+    } else if (shared_type == 1) {
+      // Shared flat inside private substring
+      flat1 = CordRep::Ref(MakeFlat(str1));
+      flat = RemovePrefix(10, flat1);
+    } else {
+      // Private flat inside shared substring
+      flat = CordRep::Ref(RemovePrefix(10, MakeFlat(str1, 100)));
+    }
+
+    CordRepRing* ring = CreateWithCapacity(flat, 1);
+    CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, str2));
+    ASSERT_THAT(result, IsValidRingBuffer());
+    EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+    EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+    EXPECT_THAT(result->length, Eq(4 + str2.size()));
+    EXPECT_THAT(ToFlats(result), ElementsAre("1234", str2));
+
+    CordRep::Unref(shared_type == 1 ? flat1 : flat);
+  }
+}
+
+TEST_P(CordRingBuildTest, AppendStringWithExtra) {
+  absl::string_view str1 = "1234";
+  absl::string_view str2 = "1234567890";
+  absl::string_view str3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRepRing* ring = CreateWithCapacity(MakeExternal(str1), 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, str2, 26));
+  result = CordRepRing::Append(result, str3);
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size() + str3.size()));
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str1, StrCat(str2, str3)));
+}
+
+TEST_P(CordRingBuildTest, PrependString) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  // Use external rep to avoid appending to first flat
+  CordRepRing* ring = CreateWithCapacity(MakeExternal(str1), 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, str2));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  if (GetParam().with_capacity && GetParam().refcount_is_one) {
+    EXPECT_THAT(result, Eq(ring));
+  } else {
+    EXPECT_THAT(result, Ne(ring));
+  }
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size()));
+  EXPECT_THAT(ToFlats(result), ElementsAre(str2, str1));
+}
+
+TEST_P(CordRingBuildTest, PrependStringHavingExtra) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz1234";
+  absl::string_view str2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRep* flat = RemovePrefix(26, MakeFlat(str1));
+  CordRepRing* ring = CreateWithCapacity(flat, 0);
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, str2));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(result->length, Eq(4 + str2.size()));
+  if (GetParam().refcount_is_one) {
+    EXPECT_THAT(ToFlats(result), ElementsAre(StrCat(str2, "1234")));
+  } else {
+    EXPECT_THAT(ToFlats(result), ElementsAre(str2, "1234"));
+  }
+}
+
+TEST_P(CordRingBuildTest, PrependStringHavingSharedExtra) {
+  absl::string_view str1 = "123456789_ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  absl::string_view str2 = "abcdefghij";
+  absl::string_view str1a = str1.substr(10);
+  for (int shared_type = 1; shared_type < 2; ++shared_type) {
+    SCOPED_TRACE(absl::StrCat("Shared extra type ", shared_type));
+
+    // Create a flat that is shared in some way.
+    CordRep* flat = nullptr;
+    CordRep* flat1 = nullptr;
+    if (shared_type == 1) {
+      // Shared flat inside private substring
+      flat = RemovePrefix(10, flat1 = CordRep::Ref(MakeFlat(str1)));
+    } else {
+      // Private flat inside shared substring
+      flat = CordRep::Ref(RemovePrefix(10, MakeFlat(str1, 100)));
+    }
+
+    CordRepRing* ring = CreateWithCapacity(flat, 1);
+    CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, str2));
+    ASSERT_THAT(result, IsValidRingBuffer());
+    EXPECT_THAT(result->length, Eq(str1a.size() + str2.size()));
+    EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+    EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+    EXPECT_THAT(ToFlats(result), ElementsAre(str2, str1a));
+    CordRep::Unref(shared_type == 1 ? flat1 : flat);
+  }
+}
+
+TEST_P(CordRingBuildTest, PrependStringWithExtra) {
+  absl::string_view str1 = "1234";
+  absl::string_view str2 = "1234567890";
+  absl::string_view str3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  CordRepRing* ring = CreateWithCapacity(MakeExternal(str1), 1);
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, str2, 26));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  result = CordRepRing::Prepend(result, str3);
+  EXPECT_THAT(result->length, Eq(str1.size() + str2.size() + str3.size()));
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre(StrCat(str3, str2), str1));
+}
+
+TEST_P(CordRingBuildTest, AppendPrependStringMix) {
+  const auto& flats = kFoxFlats;
+  CordRepRing* ring = CreateWithCapacity(MakeFlat(flats[4]), 8);
+  CordRepRing* result = ring;
+  for (int i = 1; i <= 4; ++i) {
+    result = CordRepRing::Prepend(result, flats[4 - i]);
+    result = CordRepRing::Append(result, flats[4 + i]);
+  }
+  NeedsUnref(result);
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(ToString(result), kFox);
+}
+
+TEST_P(CordRingBuildTest, AppendPrependStringMixWithExtra) {
+  const auto& flats = kFoxFlats;
+  CordRepRing* ring = CreateWithCapacity(MakeFlat(flats[4], 100), 8);
+  CordRepRing* result = ring;
+  for (int i = 1; i <= 4; ++i) {
+    result = CordRepRing::Prepend(result, flats[4 - i], 100);
+    result = CordRepRing::Append(result, flats[4 + i], 100);
+  }
+  NeedsUnref(result);
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  if (GetParam().refcount_is_one) {
+    EXPECT_THAT(ToFlats(result),
+                ElementsAre("The quick brown fox ", "jumps over the lazy dog"));
+  } else {
+    EXPECT_THAT(ToFlats(result), ElementsAre("The quick brown fox ", "jumps ",
+                                             "over the lazy dog"));
+  }
+}
+
+TEST_P(CordRingBuildTest, AppendPrependStringMixWithPrependedExtra) {
+  const auto& flats = kFoxFlats;
+  CordRep* flat = MakeFlat(StrCat(std::string(50, '.'), flats[4]), 50);
+  CordRepRing* ring = CreateWithCapacity(RemovePrefix(50, flat), 0);
+  CordRepRing* result = ring;
+  for (int i = 1; i <= 4; ++i) {
+    result = CordRepRing::Prepend(result, flats[4 - i], 100);
+    result = CordRepRing::Append(result, flats[4 + i], 100);
+  }
+  result = NeedsUnref(result);
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+  if (GetParam().refcount_is_one) {
+    EXPECT_THAT(ToFlats(result), ElementsAre(kFox));
+  } else {
+    EXPECT_THAT(ToFlats(result), ElementsAre("The quick brown fox ", "jumps ",
+                                             "over the lazy dog"));
+  }
+}
+
+TEST_P(CordRingSubTest, SubRing) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  string_view all = kFox;
+  for (size_t offset = 0; offset < all.size() - 1; ++offset) {
+    CordRepRing* ring = RefIfShared(FromFlats(flats, composition));
+    CordRepRing* result = CordRepRing::SubRing(ring, offset, 0);
+    EXPECT_THAT(result, nullptr);
+
+    for (size_t len = 1; len < all.size() - offset; ++len) {
+      ring = RefIfShared(FromFlats(flats, composition));
+      result = NeedsUnref(CordRepRing::SubRing(ring, offset, len));
+      ASSERT_THAT(result, IsValidRingBuffer());
+      ASSERT_THAT(result, EqIfPrivate(GetParam(), ring));
+      ASSERT_THAT(result, NeIfShared(GetParam(), ring));
+      ASSERT_THAT(ToString(result), Eq(all.substr(offset, len)));
+    }
+  }
+}
+
+TEST_P(CordRingSubTest, SubRingFromLargeExternal) {
+  auto composition = RandomComposition();
+  std::string large_string(1 << 20, '.');
+  const char* flats[] = {
+      "abcdefghijklmnopqrstuvwxyz",
+      large_string.c_str(),
+      "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
+  };
+  std::string buffer = absl::StrCat(flats[0], flats[1], flats[2]);
+  absl::string_view all = buffer;
+  for (size_t offset = 0; offset < 30; ++offset) {
+    CordRepRing* ring = RefIfShared(FromFlats(flats, composition));
+    CordRepRing* result = CordRepRing::SubRing(ring, offset, 0);
+    EXPECT_THAT(result, nullptr);
+
+    for (size_t len = all.size() - 30; len < all.size() - offset; ++len) {
+      ring = RefIfShared(FromFlats(flats, composition));
+      result = NeedsUnref(CordRepRing::SubRing(ring, offset, len));
+      ASSERT_THAT(result, IsValidRingBuffer());
+      ASSERT_THAT(result, EqIfPrivate(GetParam(), ring));
+      ASSERT_THAT(result, NeIfShared(GetParam(), ring));
+      auto str = ToString(result);
+      ASSERT_THAT(str, SizeIs(len));
+      ASSERT_THAT(str, Eq(all.substr(offset, len)));
+    }
+  }
+}
+
+TEST_P(CordRingSubTest, RemovePrefix) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  string_view all = kFox;
+  CordRepRing* ring = RefIfShared(FromFlats(flats, composition));
+  CordRepRing* result = CordRepRing::RemovePrefix(ring, all.size());
+  EXPECT_THAT(result, nullptr);
+
+  for (size_t len = 1; len < all.size(); ++len) {
+    ring = RefIfShared(FromFlats(flats, composition));
+    result = NeedsUnref(CordRepRing::RemovePrefix(ring, len));
+    ASSERT_THAT(result, IsValidRingBuffer());
+    EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+    ASSERT_THAT(result, NeIfShared(GetParam(), ring));
+    EXPECT_THAT(ToString(result), Eq(all.substr(len)));
+  }
+}
+
+TEST_P(CordRingSubTest, RemovePrefixFromLargeExternal) {
+  CordRepExternal* external1 = MakeFakeExternal(1 << 20);
+  CordRepExternal* external2 = MakeFakeExternal(1 << 20);
+  CordRepRing* ring = CordRepRing::Create(external1, 1);
+  ring = CordRepRing::Append(ring, external2);
+  CordRepRing* result = NeedsUnref(CordRepRing::RemovePrefix(ring, 1 << 16));
+  EXPECT_THAT(
+      ToRawFlats(result),
+      ElementsAre(
+          not_a_string_view(external1->base, 1 << 20).remove_prefix(1 << 16),
+          not_a_string_view(external2->base, 1 << 20)));
+}
+
+TEST_P(CordRingSubTest, RemoveSuffix) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  string_view all = kFox;
+  CordRepRing* ring = RefIfShared(FromFlats(flats, composition));
+  CordRepRing* result = CordRepRing::RemoveSuffix(ring, all.size());
+  EXPECT_THAT(result, nullptr);
+
+  for (size_t len = 1; len < all.size(); ++len) {
+    ring = RefIfShared(FromFlats(flats, composition));
+    result = NeedsUnref(CordRepRing::RemoveSuffix(ring, len));
+    ASSERT_THAT(result, IsValidRingBuffer());
+    ASSERT_THAT(result, EqIfPrivate(GetParam(), ring));
+    ASSERT_THAT(result, NeIfShared(GetParam(), ring));
+    ASSERT_THAT(ToString(result), Eq(all.substr(0, all.size() - len)));
+  }
+}
+
+TEST_P(CordRingSubTest, AppendRing) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats).subspan(1);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat(kFoxFlats[0]), flats.size());
+  CordRepRing* child = FromFlats(flats, composition);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivate(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAreArray(kFoxFlats));
+}
+
+TEST_P(CordRingBuildInputTest, AppendRingWithFlatOffset) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Head"), flats.size());
+  CordRep* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = RemovePrefix(10, child);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("Head", "brown ", "fox ", "jumps ",
+                                           "over ", "the ", "lazy ", "dog"));
+}
+
+TEST_P(CordRingBuildInputTest, AppendRingWithBrokenOffset) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Head"), flats.size());
+  CordRep* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = RemovePrefix(21, child);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result),
+              ElementsAre("Head", "umps ", "over ", "the ", "lazy ", "dog"));
+}
+
+TEST_P(CordRingBuildInputTest, AppendRingWithFlatLength) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Head"), flats.size());
+  CordRep* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = RemoveSuffix(8, child);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("Head", "The ", "quick ", "brown ",
+                                           "fox ", "jumps ", "over ", "the "));
+}
+
+TEST_P(CordRingBuildTest, AppendRingWithBrokenFlatLength) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Head"), flats.size());
+  CordRep* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = RemoveSuffix(15, child);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("Head", "The ", "quick ", "brown ",
+                                           "fox ", "jumps ", "ov"));
+}
+
+TEST_P(CordRingBuildTest, AppendRingMiddlePiece) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Head"), flats.size());
+  CordRep* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = MakeSubstring(7, child->length - 27, child);
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result),
+              ElementsAre("Head", "ck ", "brown ", "fox ", "jum"));
+}
+
+TEST_P(CordRingBuildTest, AppendRingSinglePiece) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Head"), flats.size());
+  CordRep* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputShared(MakeSubstring(11, 3, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("Head", "row"));
+}
+
+TEST_P(CordRingBuildInputTest, AppendRingSinglePieceWithPrefix) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  size_t extra_capacity = 1 + (GetParam().with_capacity ? flats.size() : 0);
+  CordRepRing* ring = CordRepRing::Create(MakeFlat("Head"), extra_capacity);
+  ring->SetCapacityForTesting(1 + extra_capacity);
+  ring = RefIfShared(CordRepRing::Prepend(ring, MakeFlat("Prepend")));
+  assert(ring->IsValid(std::cout));
+  CordRepRing* child = RefIfInputSharedIndirect(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputShared(MakeSubstring(11, 3, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Append(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("Prepend", "Head", "row"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRing) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto fox = MakeSpan(kFoxFlats);
+  auto flats = MakeSpan(fox).subspan(0, fox.size() - 1);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat(fox.back()), flats.size());
+  CordRepRing* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAreArray(kFoxFlats));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingWithFlatOffset) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Tail"), flats.size());
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputSharedIndirect(RemovePrefix(10, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("brown ", "fox ", "jumps ", "over ",
+                                           "the ", "lazy ", "dog", "Tail"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingWithBrokenOffset) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Tail"), flats.size());
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputSharedIndirect(RemovePrefix(21, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result),
+              ElementsAre("umps ", "over ", "the ", "lazy ", "dog", "Tail"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingWithFlatLength) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Tail"), flats.size());
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputSharedIndirect(RemoveSuffix(8, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("The ", "quick ", "brown ", "fox ",
+                                           "jumps ", "over ", "the ", "Tail"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingWithBrokenFlatLength) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Tail"), flats.size());
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputSharedIndirect(RemoveSuffix(15, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("The ", "quick ", "brown ", "fox ",
+                                           "jumps ", "ov", "Tail"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingMiddlePiece) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Tail"), flats.size());
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped =
+      RefIfInputSharedIndirect(MakeSubstring(7, child->length - 27, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result),
+              ElementsAre("ck ", "brown ", "fox ", "jum", "Tail"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingSinglePiece) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CreateWithCapacity(MakeFlat("Tail"), flats.size());
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputSharedIndirect(MakeSubstring(11, 3, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("row", "Tail"));
+}
+
+TEST_P(CordRingBuildInputTest, PrependRingSinglePieceWithPrefix) {
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  auto flats = MakeSpan(kFoxFlats);
+  size_t extra_capacity = 1 + (GetParam().with_capacity ? flats.size() : 0);
+  CordRepRing* ring = CordRepRing::Create(MakeFlat("Tail"), extra_capacity);
+  ring->SetCapacityForTesting(1 + extra_capacity);
+  ring = RefIfShared(CordRepRing::Prepend(ring, MakeFlat("Prepend")));
+  CordRep* child = RefIfInputShared(FromFlats(flats, composition));
+  CordRep* stripped = RefIfInputSharedIndirect(MakeSubstring(11, 3, child));
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, stripped));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  EXPECT_THAT(result, EqIfPrivateAndCapacity(GetParam(), ring));
+  EXPECT_THAT(result, NeIfShared(GetParam(), ring));
+  EXPECT_THAT(ToFlats(result), ElementsAre("row", "Prepend", "Tail"));
+}
+
+TEST_F(CordRingTest, Find) {
+  constexpr const char* flats[] = {
+      "abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+      "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+      "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  CordRepRing* ring = NeedsUnref(FromFlats(flats, composition));
+  std::string value = ToString(ring);
+  for (int i = 0; i < value.length(); ++i) {
+    CordRepRing::Position found = ring->Find(i);
+    auto data = ring->entry_data(found.index);
+    ASSERT_THAT(found.offset, Lt(data.length()));
+    ASSERT_THAT(data[found.offset], Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, FindWithHint) {
+  constexpr const char* flats[] = {
+      "abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+      "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+      "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  CordRepRing* ring = NeedsUnref(FromFlats(flats, composition));
+  std::string value = ToString(ring);
+
+#if defined(GTEST_HAS_DEATH_TEST)
+  // Test hint beyond valid position
+  index_type head = ring->head();
+  EXPECT_DEBUG_DEATH(ring->Find(ring->advance(head), 0), ".*");
+  EXPECT_DEBUG_DEATH(ring->Find(ring->advance(head), 9), ".*");
+  EXPECT_DEBUG_DEATH(ring->Find(ring->advance(head, 3), 24), ".*");
+#endif
+
+  int flat_pos = 0;
+  size_t flat_offset = 0;
+  for (auto sflat : flats) {
+    string_view flat(sflat);
+    for (int offset = 0; offset < flat.length(); ++offset) {
+      for (int start = 0; start <= flat_pos; ++start) {
+        index_type hint = ring->advance(ring->head(), start);
+        CordRepRing::Position found = ring->Find(hint, flat_offset + offset);
+        ASSERT_THAT(found.index, Eq(ring->advance(ring->head(), flat_pos)));
+        ASSERT_THAT(found.offset, Eq(offset));
+      }
+    }
+    ++flat_pos;
+    flat_offset += flat.length();
+  }
+}
+
+TEST_F(CordRingTest, FindInLargeRing) {
+  constexpr const char* flats[] = {
+      "abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+      "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+      "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  CordRepRing* ring = FromFlats(flats, composition);
+  for (int i = 0; i < 13; ++i) {
+    ring = CordRepRing::Append(ring, FromFlats(flats, composition));
+  }
+  NeedsUnref(ring);
+  std::string value = ToString(ring);
+  for (int i = 0; i < value.length(); ++i) {
+    CordRepRing::Position pos = ring->Find(i);
+    auto data = ring->entry_data(pos.index);
+    ASSERT_THAT(pos.offset, Lt(data.length()));
+    ASSERT_THAT(data[pos.offset], Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, FindTail) {
+  constexpr const char* flats[] = {
+      "abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+      "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+      "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  CordRepRing* ring = NeedsUnref(FromFlats(flats, composition));
+  std::string value = ToString(ring);
+
+  for (int i = 0; i < value.length(); ++i) {
+    CordRepRing::Position pos = ring->FindTail(i + 1);
+    auto data = ring->entry_data(ring->retreat(pos.index));
+    ASSERT_THAT(pos.offset, Lt(data.length()));
+    ASSERT_THAT(data[data.length() - pos.offset - 1], Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, FindTailWithHint) {
+  constexpr const char* flats[] = {
+      "abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+      "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+      "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  CordRepRing* ring = NeedsUnref(FromFlats(flats, composition));
+  std::string value = ToString(ring);
+
+  // Test hint beyond valid position
+#if defined(GTEST_HAS_DEATH_TEST)
+  index_type head = ring->head();
+  EXPECT_DEBUG_DEATH(ring->FindTail(ring->advance(head), 1), ".*");
+  EXPECT_DEBUG_DEATH(ring->FindTail(ring->advance(head), 10), ".*");
+  EXPECT_DEBUG_DEATH(ring->FindTail(ring->advance(head, 3), 26), ".*");
+#endif
+
+  for (int i = 0; i < value.length(); ++i) {
+    CordRepRing::Position pos = ring->FindTail(i + 1);
+    auto data = ring->entry_data(ring->retreat(pos.index));
+    ASSERT_THAT(pos.offset, Lt(data.length()));
+    ASSERT_THAT(data[data.length() - pos.offset - 1], Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, FindTailInLargeRing) {
+  constexpr const char* flats[] = {
+      "abcdefghij", "klmnopqrst", "uvwxyz",     "ABCDEFGHIJ",
+      "KLMNOPQRST", "UVWXYZ",     "1234567890", "~!@#$%^&*()_",
+      "+-=",        "[]\\{}|;':", ",/<>?",      "."};
+  auto composition = RandomComposition();
+  SCOPED_TRACE(ToString(composition));
+  CordRepRing* ring = FromFlats(flats, composition);
+  for (int i = 0; i < 13; ++i) {
+    ring = CordRepRing::Append(ring, FromFlats(flats, composition));
+  }
+  NeedsUnref(ring);
+  std::string value = ToString(ring);
+  for (int i = 0; i < value.length(); ++i) {
+    CordRepRing::Position pos = ring->FindTail(i + 1);
+    auto data = ring->entry_data(ring->retreat(pos.index));
+    ASSERT_THAT(pos.offset, Lt(data.length()));
+    ASSERT_THAT(data[data.length() - pos.offset - 1], Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, GetCharacter) {
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = CordRepRing::Create(MakeFlat("Tail"), flats.size());
+  CordRep* child = FromFlats(flats, kAppend);
+  CordRepRing* result = NeedsUnref(CordRepRing::Prepend(ring, child));
+  std::string value = ToString(result);
+  for (int i = 0; i < value.length(); ++i) {
+    ASSERT_THAT(result->GetCharacter(i), Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, GetCharacterWithSubstring) {
+  absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
+  auto* child = MakeSubstring(4, 20, MakeFlat(str1));
+  CordRepRing* result = NeedsUnref(CordRepRing::Create(child));
+  ASSERT_THAT(result, IsValidRingBuffer());
+  std::string value = ToString(result);
+  for (int i = 0; i < value.length(); ++i) {
+    ASSERT_THAT(result->GetCharacter(i), Eq(value[i]));
+  }
+}
+
+TEST_F(CordRingTest, IsFlatSingleFlat) {
+  for (bool external : {false, true}) {
+    SCOPED_TRACE(external ? "With External" : "With Flat");
+    absl::string_view str = "Hello world";
+    CordRep* rep = external ? MakeExternal(str) : MakeFlat(str);
+    CordRepRing* ring = NeedsUnref(CordRepRing::Create(rep));
+
+    // The ring is a single non-fragmented flat:
+    absl::string_view fragment;
+    EXPECT_TRUE(ring->IsFlat(nullptr));
+    EXPECT_TRUE(ring->IsFlat(&fragment));
+    EXPECT_THAT(fragment, Eq("Hello world"));
+    fragment = "";
+    EXPECT_TRUE(ring->IsFlat(0, 11, nullptr));
+    EXPECT_TRUE(ring->IsFlat(0, 11, &fragment));
+    EXPECT_THAT(fragment, Eq("Hello world"));
+
+    // Arbitrary ranges must check true as well.
+    EXPECT_TRUE(ring->IsFlat(1, 4, &fragment));
+    EXPECT_THAT(fragment, Eq("ello"));
+    EXPECT_TRUE(ring->IsFlat(6, 5, &fragment));
+    EXPECT_THAT(fragment, Eq("world"));
+  }
+}
+
+TEST_F(CordRingTest, IsFlatMultiFlat) {
+  for (bool external : {false, true}) {
+    SCOPED_TRACE(external ? "With External" : "With Flat");
+    absl::string_view str1 = "Hello world";
+    absl::string_view str2 = "Halt and catch fire";
+    CordRep* rep1 = external ? MakeExternal(str1) : MakeFlat(str1);
+    CordRep* rep2 = external ? MakeExternal(str2) : MakeFlat(str2);
+    CordRepRing* ring = CordRepRing::Append(CordRepRing::Create(rep1), rep2);
+    NeedsUnref(ring);
+
+    // The ring is fragmented, IsFlat() on the entire cord must be false.
+    EXPECT_FALSE(ring->IsFlat(nullptr));
+    absl::string_view fragment = "Don't touch this";
+    EXPECT_FALSE(ring->IsFlat(&fragment));
+    EXPECT_THAT(fragment, Eq("Don't touch this"));
+
+    // Check for ranges exactly within both flats.
+    EXPECT_TRUE(ring->IsFlat(0, 11, &fragment));
+    EXPECT_THAT(fragment, Eq("Hello world"));
+    EXPECT_TRUE(ring->IsFlat(11, 19, &fragment));
+    EXPECT_THAT(fragment, Eq("Halt and catch fire"));
+
+    // Check for arbitrary partial range inside each flat.
+    EXPECT_TRUE(ring->IsFlat(1, 4, &fragment));
+    EXPECT_THAT(fragment, "ello");
+    EXPECT_TRUE(ring->IsFlat(26, 4, &fragment));
+    EXPECT_THAT(fragment, "fire");
+
+    // Check ranges spanning across both flats
+    fragment = "Don't touch this";
+    EXPECT_FALSE(ring->IsFlat(1, 18, &fragment));
+    EXPECT_FALSE(ring->IsFlat(10, 2, &fragment));
+    EXPECT_THAT(fragment, Eq("Don't touch this"));
+  }
+}
+
+TEST_F(CordRingTest, Dump) {
+  std::stringstream ss;
+  auto flats = MakeSpan(kFoxFlats);
+  CordRepRing* ring = NeedsUnref(FromFlats(flats, kPrepend));
+  ss << *ring;
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/cord_test.cc b/abseil-cpp/absl/strings/cord_test.cc
index 4443c82..36e397e 100644
--- a/abseil-cpp/absl/strings/cord_test.cc
+++ b/abseil-cpp/absl/strings/cord_test.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/cord.h"
 
 #include <algorithm>
@@ -14,19 +28,40 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/casts.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/endian.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/container/fixed_array.h"
+#include "absl/hash/hash.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
+#include "absl/random/random.h"
 #include "absl/strings/cord_test_helpers.h"
+#include "absl/strings/cordz_test_helpers.h"
+#include "absl/strings/match.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_format.h"
 #include "absl/strings/string_view.h"
 
+// convenience local constants
+static constexpr auto FLAT = absl::cord_internal::FLAT;
+static constexpr auto MAX_FLAT_TAG = absl::cord_internal::MAX_FLAT_TAG;
+
 typedef std::mt19937_64 RandomEngine;
 
+using absl::cord_internal::CordRep;
+using absl::cord_internal::CordRepBtree;
+using absl::cord_internal::CordRepConcat;
+using absl::cord_internal::CordRepCrc;
+using absl::cord_internal::CordRepExternal;
+using absl::cord_internal::CordRepFlat;
+using absl::cord_internal::CordRepSubstring;
+using absl::cord_internal::CordzUpdateTracker;
+using absl::cord_internal::kFlatOverhead;
+using absl::cord_internal::kMaxFlatLength;
+using ::testing::ElementsAre;
+using ::testing::Le;
+
 static std::string RandomLowercaseString(RandomEngine* rng);
 static std::string RandomLowercaseString(RandomEngine* rng, size_t length);
 
@@ -167,12 +202,130 @@
       const Cord& c, absl::FunctionRef<void(absl::string_view)> callback) {
     c.ForEachChunk(callback);
   }
+
+  static bool IsTree(const Cord& c) { return c.contents_.is_tree(); }
+  static CordRep* Tree(const Cord& c) { return c.contents_.tree(); }
+
+  static cord_internal::CordzInfo* GetCordzInfo(const Cord& c) {
+    return c.contents_.cordz_info();
+  }
+
+  static Cord MakeSubstring(Cord src, size_t offset, size_t length) {
+    CHECK(src.contents_.is_tree()) << "Can not be inlined";
+    CHECK(!src.ExpectedChecksum().has_value()) << "Can not be hardened";
+    Cord cord;
+    auto* tree = cord_internal::SkipCrcNode(src.contents_.tree());
+    auto* rep = CordRepSubstring::Create(CordRep::Ref(tree), offset, length);
+    cord.contents_.EmplaceTree(rep, CordzUpdateTracker::kSubCord);
+    return cord;
+  }
 };
 
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-TEST(Cord, AllFlatSizes) {
+// The CordTest fixture runs all tests with and without Cord Btree enabled,
+// and with our without expected CRCs being set on the subject Cords.
+class CordTest : public testing::TestWithParam<int> {
+ public:
+  // Returns true if test is running with btree enabled.
+  bool UseCrc() const { return GetParam() == 2 || GetParam() == 3; }
+  void MaybeHarden(absl::Cord& c) {
+    if (UseCrc()) {
+      c.SetExpectedChecksum(1);
+    }
+  }
+  absl::Cord MaybeHardened(absl::Cord c) {
+    MaybeHarden(c);
+    return c;
+  }
+
+  // Returns human readable string representation of the test parameter.
+  static std::string ToString(testing::TestParamInfo<int> param) {
+    switch (param.param) {
+      case 0:
+        return "Btree";
+      case 1:
+        return "BtreeHardened";
+      default:
+        assert(false);
+        return "???";
+    }
+  }
+};
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Values(0, 1),
+                         CordTest::ToString);
+
+TEST(CordRepFlat, AllFlatCapacities) {
+  // Explicitly and redundantly assert built-in min/max limits
+  static_assert(absl::cord_internal::kFlatOverhead < 32, "");
+  static_assert(absl::cord_internal::kMinFlatSize == 32, "");
+  static_assert(absl::cord_internal::kMaxLargeFlatSize == 256 << 10, "");
+  EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(FLAT), 32);
+  EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(MAX_FLAT_TAG), 256 << 10);
+
+  // Verify all tags to map perfectly back and forth, and
+  // that sizes are monotonically increasing.
+  size_t last_size = 0;
+  for (int tag = FLAT; tag <= MAX_FLAT_TAG; ++tag) {
+    size_t size = absl::cord_internal::TagToAllocatedSize(tag);
+    ASSERT_GT(size, last_size);
+    ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+    last_size = size;
+  }
+
+  // All flat size from 32 - 512 are 8 byte granularity
+  for (size_t size = 32; size <= 512; size += 8) {
+    ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
+    uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
+    ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+  }
+
+  // All flat sizes from 512 - 8192 are 64 byte granularity
+  for (size_t size = 512; size <= 8192; size += 64) {
+    ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
+    uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
+    ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+  }
+
+  // All flat sizes from 8KB to 256KB are 4KB granularity
+  for (size_t size = 8192; size <= 256 * 1024; size += 4 * 1024) {
+    ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
+    uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
+    ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+  }
+}
+
+TEST(CordRepFlat, MaxFlatSize) {
+  CordRepFlat* flat = CordRepFlat::New(kMaxFlatLength);
+  EXPECT_EQ(flat->Capacity(), kMaxFlatLength);
+  CordRep::Unref(flat);
+
+  flat = CordRepFlat::New(kMaxFlatLength * 4);
+  EXPECT_EQ(flat->Capacity(), kMaxFlatLength);
+  CordRep::Unref(flat);
+}
+
+TEST(CordRepFlat, MaxLargeFlatSize) {
+  const size_t size = 256 * 1024 - kFlatOverhead;
+  CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), size);
+  EXPECT_GE(flat->Capacity(), size);
+  CordRep::Unref(flat);
+}
+
+TEST(CordRepFlat, AllFlatSizes) {
+  const size_t kMaxSize = 256 * 1024;
+  for (size_t size = 32; size <= kMaxSize; size *=2) {
+    const size_t length = size - kFlatOverhead - 1;
+    CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), length);
+    EXPECT_GE(flat->Capacity(), length);
+    memset(flat->Data(), 0xCD, flat->Capacity());
+    CordRep::Unref(flat);
+  }
+}
+
+TEST_P(CordTest, AllFlatSizes) {
   using absl::strings_internal::CordTestAccess;
 
   for (size_t s = 0; s < CordTestAccess::MaxFlatLength(); s++) {
@@ -183,6 +336,7 @@
     }
 
     absl::Cord dst(src);
+    MaybeHarden(dst);
     EXPECT_EQ(std::string(dst), src) << s;
   }
 }
@@ -190,7 +344,7 @@
 // We create a Cord at least 128GB in size using the fact that Cords can
 // internally reference-count; thus the Cord is enormous without actually
 // consuming very much memory.
-TEST(GigabyteCord, FromExternal) {
+TEST_P(CordTest, GigabyteCordFromExternal) {
   const size_t one_gig = 1024U * 1024U * 1024U;
   size_t max_size = 2 * one_gig;
   if (sizeof(max_size) > 4) max_size = 128 * one_gig;
@@ -207,7 +361,6 @@
   // caused crashes in production.  We grow exponentially so that the code will
   // execute in a reasonable amount of time.
   absl::Cord c;
-  ABSL_RAW_LOG(INFO, "Made a Cord with %zu bytes!", c.size());
   c.Append(from);
   while (c.size() < max_size) {
     c.Append(c);
@@ -215,12 +368,13 @@
     c.Append(from);
     c.Append(from);
     c.Append(from);
+    MaybeHarden(c);
   }
 
   for (int i = 0; i < 1024; ++i) {
     c.Append(from);
   }
-  ABSL_RAW_LOG(INFO, "Made a Cord with %zu bytes!", c.size());
+  LOG(INFO) << "Made a Cord with " << c.size() << " bytes!";
   // Note: on a 32-bit build, this comes out to   2,818,048,000 bytes.
   // Note: on a 64-bit build, this comes out to 171,932,385,280 bytes.
 }
@@ -240,9 +394,11 @@
 extern bool my_unique_true_boolean;
 bool my_unique_true_boolean = true;
 
-TEST(Cord, Assignment) {
+TEST_P(CordTest, Assignment) {
   absl::Cord x(absl::string_view("hi there"));
   absl::Cord y(x);
+  MaybeHarden(y);
+  ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt);
   ASSERT_EQ(std::string(x), "hi there");
   ASSERT_EQ(std::string(y), "hi there");
   ASSERT_TRUE(x == y);
@@ -294,8 +450,9 @@
   }
 }
 
-TEST(Cord, StartsEndsWith) {
+TEST_P(CordTest, StartsEndsWith) {
   absl::Cord x(absl::string_view("abcde"));
+  MaybeHarden(x);
   absl::Cord empty("");
 
   ASSERT_TRUE(x.StartsWith(absl::Cord("abcde")));
@@ -327,13 +484,14 @@
   ASSERT_TRUE(!empty.EndsWith("xyz"));
 }
 
-TEST(Cord, Subcord) {
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+TEST_P(CordTest, Subcord) {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
   const std::string s = RandomLowercaseString(&rng, 1024);
 
   absl::Cord a;
   AppendWithFragments(s, &rng, &a);
-  ASSERT_EQ(s.size(), a.size());
+  MaybeHarden(a);
+  ASSERT_EQ(s, std::string(a));
 
   // Check subcords of a, from a variety of interesting points.
   std::set<size_t> positions;
@@ -351,9 +509,12 @@
     for (size_t end_pos : positions) {
       if (end_pos < pos || end_pos > a.size()) continue;
       absl::Cord sa = a.Subcord(pos, end_pos - pos);
-      EXPECT_EQ(absl::string_view(s).substr(pos, end_pos - pos),
+      ASSERT_EQ(absl::string_view(s).substr(pos, end_pos - pos),
                 std::string(sa))
           << a;
+      if (pos != 0 || end_pos != a.size()) {
+        ASSERT_EQ(sa.ExpectedChecksum(), absl::nullopt);
+      }
     }
   }
 
@@ -363,7 +524,7 @@
   for (size_t pos = 0; pos <= sh.size(); ++pos) {
     for (size_t n = 0; n <= sh.size() - pos; ++n) {
       absl::Cord sc = c.Subcord(pos, n);
-      EXPECT_EQ(sh.substr(pos, n), std::string(sc)) << c;
+      ASSERT_EQ(sh.substr(pos, n), std::string(sc)) << c;
     }
   }
 
@@ -373,7 +534,7 @@
   while (sa.size() > 1) {
     sa = sa.Subcord(1, sa.size() - 2);
     ss = ss.substr(1, ss.size() - 2);
-    EXPECT_EQ(ss, std::string(sa)) << a;
+    ASSERT_EQ(ss, std::string(sa)) << a;
     if (HasFailure()) break;  // halt cascade
   }
 
@@ -388,15 +549,24 @@
   EXPECT_TRUE(sa.empty());
 }
 
-TEST(Cord, Swap) {
+TEST_P(CordTest, Swap) {
   absl::string_view a("Dexter");
   absl::string_view b("Mandark");
   absl::Cord x(a);
   absl::Cord y(b);
+  MaybeHarden(x);
   swap(x, y);
+  if (UseCrc()) {
+    ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt);
+    ASSERT_EQ(y.ExpectedChecksum(), 1);
+  }
   ASSERT_EQ(x, absl::Cord(b));
   ASSERT_EQ(y, absl::Cord(a));
   x.swap(y);
+  if (UseCrc()) {
+    ASSERT_EQ(x.ExpectedChecksum(), 1);
+    ASSERT_EQ(y.ExpectedChecksum(), absl::nullopt);
+  }
   ASSERT_EQ(x, absl::Cord(a));
   ASSERT_EQ(y, absl::Cord(b));
 }
@@ -420,56 +590,394 @@
   }
 }
 
-TEST(Cord, CopyToString) {
-  VerifyCopyToString(absl::Cord());
-  VerifyCopyToString(absl::Cord("small cord"));
-  VerifyCopyToString(
+TEST_P(CordTest, CopyToString) {
+  VerifyCopyToString(absl::Cord());  // empty cords cannot carry CRCs
+  VerifyCopyToString(MaybeHardened(absl::Cord("small cord")));
+  VerifyCopyToString(MaybeHardened(
       absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ",
-                                "copying ", "to ", "a ", "string."}));
+                                "copying ", "to ", "a ", "string."})));
 }
 
-TEST(TryFlat, Empty) {
+TEST_P(CordTest, AppendEmptyBuffer) {
+  absl::Cord cord;
+  cord.Append(absl::CordBuffer());
+  cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
+}
+
+TEST_P(CordTest, AppendEmptyBufferToFlat) {
+  absl::Cord cord(std::string(2000, 'x'));
+  cord.Append(absl::CordBuffer());
+  cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
+}
+
+TEST_P(CordTest, AppendEmptyBufferToTree) {
+  absl::Cord cord(std::string(2000, 'x'));
+  cord.Append(std::string(2000, 'y'));
+  cord.Append(absl::CordBuffer());
+  cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
+}
+
+TEST_P(CordTest, AppendSmallBuffer) {
+  absl::Cord cord;
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+  ASSERT_THAT(buffer.capacity(), Le(15));
+  memcpy(buffer.data(), "Abc", 3);
+  buffer.SetLength(3);
+  cord.Append(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+  memcpy(buffer.data(), "defgh", 5);
+  buffer.SetLength(5);
+  cord.Append(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  EXPECT_THAT(cord.Chunks(), ElementsAre("Abcdefgh"));
+}
+
+TEST_P(CordTest, AppendAndPrependBufferArePrecise) {
+  // Create a cord large enough to force 40KB flats.
+  std::string test_data(absl::cord_internal::kMaxFlatLength * 10, 'x');
+  absl::Cord cord1(test_data);
+  absl::Cord cord2(test_data);
+  const size_t size1 = cord1.EstimatedMemoryUsage();
+  const size_t size2 = cord2.EstimatedMemoryUsage();
+
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+  memcpy(buffer.data(), "Abc", 3);
+  buffer.SetLength(3);
+  cord1.Append(std::move(buffer));
+
+  buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+  memcpy(buffer.data(), "Abc", 3);
+  buffer.SetLength(3);
+  cord2.Prepend(std::move(buffer));
+
+#ifndef NDEBUG
+  // Allow 32 bytes new CordRepFlat, and 128 bytes for 'glue nodes'
+  constexpr size_t kMaxDelta = 128 + 32;
+#else
+  // Allow 256 bytes extra for 'allocation debug overhead'
+  constexpr size_t kMaxDelta = 128 + 32 + 256;
+#endif
+
+  EXPECT_LE(cord1.EstimatedMemoryUsage() - size1, kMaxDelta);
+  EXPECT_LE(cord2.EstimatedMemoryUsage() - size2, kMaxDelta);
+
+  EXPECT_EQ(cord1, absl::StrCat(test_data, "Abc"));
+  EXPECT_EQ(cord2, absl::StrCat("Abc", test_data));
+}
+
+TEST_P(CordTest, PrependSmallBuffer) {
+  absl::Cord cord;
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+  ASSERT_THAT(buffer.capacity(), Le(15));
+  memcpy(buffer.data(), "Abc", 3);
+  buffer.SetLength(3);
+  cord.Prepend(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+  memcpy(buffer.data(), "defgh", 5);
+  buffer.SetLength(5);
+  cord.Prepend(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  EXPECT_THAT(cord.Chunks(), ElementsAre("defghAbc"));
+}
+
+TEST_P(CordTest, AppendLargeBuffer) {
+  absl::Cord cord;
+
+  std::string s1(700, '1');
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size());
+  memcpy(buffer.data(), s1.data(), s1.size());
+  buffer.SetLength(s1.size());
+  cord.Append(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  std::string s2(1000, '2');
+  buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size());
+  memcpy(buffer.data(), s2.data(), s2.size());
+  buffer.SetLength(s2.size());
+  cord.Append(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  EXPECT_THAT(cord.Chunks(), ElementsAre(s1, s2));
+}
+
+TEST_P(CordTest, PrependLargeBuffer) {
+  absl::Cord cord;
+
+  std::string s1(700, '1');
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size());
+  memcpy(buffer.data(), s1.data(), s1.size());
+  buffer.SetLength(s1.size());
+  cord.Prepend(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  std::string s2(1000, '2');
+  buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size());
+  memcpy(buffer.data(), s2.data(), s2.size());
+  buffer.SetLength(s2.size());
+  cord.Prepend(std::move(buffer));
+  EXPECT_EQ(buffer.length(), 0);    // NOLINT
+  EXPECT_GT(buffer.capacity(), 0);  // NOLINT
+
+  EXPECT_THAT(cord.Chunks(), ElementsAre(s2, s1));
+}
+
+class CordAppendBufferTest : public testing::TestWithParam<bool> {
+ public:
+  size_t is_default() const { return GetParam(); }
+
+  // Returns human readable string representation of the test parameter.
+  static std::string ToString(testing::TestParamInfo<bool> param) {
+    return param.param ? "DefaultLimit" : "CustomLimit";
+  }
+
+  size_t limit() const {
+    return is_default() ? absl::CordBuffer::kDefaultLimit
+                        : absl::CordBuffer::kCustomLimit;
+  }
+
+  size_t maximum_payload() const {
+    return is_default() ? absl::CordBuffer::MaximumPayload()
+                        : absl::CordBuffer::MaximumPayload(limit());
+  }
+
+  absl::CordBuffer GetAppendBuffer(absl::Cord& cord, size_t capacity,
+                                   size_t min_capacity = 16) {
+    return is_default()
+               ? cord.GetAppendBuffer(capacity, min_capacity)
+               : cord.GetCustomAppendBuffer(limit(), capacity, min_capacity);
+  }
+};
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordAppendBufferTest, testing::Bool(),
+                         CordAppendBufferTest::ToString);
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnEmptyCord) {
+  absl::Cord cord;
+  absl::CordBuffer buffer = GetAppendBuffer(cord, 1000);
+  EXPECT_GE(buffer.capacity(), 1000);
+  EXPECT_EQ(buffer.length(), 0);
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnInlinedCord) {
+  static constexpr int kInlinedSize = sizeof(absl::CordBuffer) - 1;
+  for (int size : {6, kInlinedSize - 3, kInlinedSize - 2, 1000}) {
+    absl::Cord cord("Abc");
+    absl::CordBuffer buffer = GetAppendBuffer(cord, size, 1);
+    EXPECT_GE(buffer.capacity(), 3 + size);
+    EXPECT_EQ(buffer.length(), 3);
+    EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
+    EXPECT_TRUE(cord.empty());
+  }
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnInlinedCordCapacityCloseToMax) {
+  // Cover the use case where we have a non empty inlined cord with some size
+  // 'n', and ask for something like 'uint64_max - k', assuming internal logic
+  // could overflow on 'uint64_max - k + size', and return a valid, but
+  // inefficiently smaller buffer if it would provide is the max allowed size.
+  for (size_t dist_from_max = 0; dist_from_max <= 4; ++dist_from_max) {
+    absl::Cord cord("Abc");
+    size_t size = std::numeric_limits<size_t>::max() - dist_from_max;
+    absl::CordBuffer buffer = GetAppendBuffer(cord, size, 1);
+    EXPECT_GE(buffer.capacity(), maximum_payload());
+    EXPECT_EQ(buffer.length(), 3);
+    EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
+    EXPECT_TRUE(cord.empty());
+  }
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnFlat) {
+  // Create a cord with a single flat and extra capacity
+  absl::Cord cord;
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+  const size_t expected_capacity = buffer.capacity();
+  buffer.SetLength(3);
+  memcpy(buffer.data(), "Abc", 3);
+  cord.Append(std::move(buffer));
+
+  buffer = GetAppendBuffer(cord, 6);
+  EXPECT_EQ(buffer.capacity(), expected_capacity);
+  EXPECT_EQ(buffer.length(), 3);
+  EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
+  EXPECT_TRUE(cord.empty());
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnFlatWithoutMinCapacity) {
+  // Create a cord with a single flat and extra capacity
+  absl::Cord cord;
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+  buffer.SetLength(30);
+  memset(buffer.data(), 'x', 30);
+  cord.Append(std::move(buffer));
+
+  buffer = GetAppendBuffer(cord, 1000, 900);
+  EXPECT_GE(buffer.capacity(), 1000);
+  EXPECT_EQ(buffer.length(), 0);
+  EXPECT_EQ(cord, std::string(30, 'x'));
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnTree) {
+  RandomEngine rng;
+  for (int num_flats : {2, 3, 100}) {
+    // Create a cord with `num_flats` flats and extra capacity
+    absl::Cord cord;
+    std::string prefix;
+    std::string last;
+    for (int i = 0; i < num_flats - 1; ++i) {
+      prefix += last;
+      last = RandomLowercaseString(&rng, 10);
+      absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+      buffer.SetLength(10);
+      memcpy(buffer.data(), last.data(), 10);
+      cord.Append(std::move(buffer));
+    }
+    absl::CordBuffer buffer = GetAppendBuffer(cord, 6);
+    EXPECT_GE(buffer.capacity(), 500);
+    EXPECT_EQ(buffer.length(), 10);
+    EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), last);
+    EXPECT_EQ(cord, prefix);
+  }
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnTreeWithoutMinCapacity) {
+  absl::Cord cord;
+  for (int i = 0; i < 2; ++i) {
+    absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+    buffer.SetLength(3);
+    memcpy(buffer.data(), i ? "def" : "Abc", 3);
+    cord.Append(std::move(buffer));
+  }
+  absl::CordBuffer buffer = GetAppendBuffer(cord, 1000, 900);
+  EXPECT_GE(buffer.capacity(), 1000);
+  EXPECT_EQ(buffer.length(), 0);
+  EXPECT_EQ(cord, "Abcdef");
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnSubstring) {
+  // Create a large cord with a single flat and some extra capacity
+  absl::Cord cord;
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+  buffer.SetLength(450);
+  memset(buffer.data(), 'x', 450);
+  cord.Append(std::move(buffer));
+  cord.RemovePrefix(1);
+
+  // Deny on substring
+  buffer = GetAppendBuffer(cord, 6);
+  EXPECT_EQ(buffer.length(), 0);
+  EXPECT_EQ(cord, std::string(449, 'x'));
+}
+
+TEST_P(CordAppendBufferTest, GetAppendBufferOnSharedCord) {
+  // Create a shared cord with a single flat and extra capacity
+  absl::Cord cord;
+  absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+  buffer.SetLength(3);
+  memcpy(buffer.data(), "Abc", 3);
+  cord.Append(std::move(buffer));
+  absl::Cord shared_cord = cord;
+
+  // Deny on flat
+  buffer = GetAppendBuffer(cord, 6);
+  EXPECT_EQ(buffer.length(), 0);
+  EXPECT_EQ(cord, "Abc");
+
+  buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+  buffer.SetLength(3);
+  memcpy(buffer.data(), "def", 3);
+  cord.Append(std::move(buffer));
+  shared_cord = cord;
+
+  // Deny on tree
+  buffer = GetAppendBuffer(cord, 6);
+  EXPECT_EQ(buffer.length(), 0);
+  EXPECT_EQ(cord, "Abcdef");
+}
+
+TEST_P(CordTest, TryFlatEmpty) {
   absl::Cord c;
   EXPECT_EQ(c.TryFlat(), "");
 }
 
-TEST(TryFlat, Flat) {
+TEST_P(CordTest, TryFlatFlat) {
   absl::Cord c("hello");
+  MaybeHarden(c);
   EXPECT_EQ(c.TryFlat(), "hello");
 }
 
-TEST(TryFlat, SubstrInlined) {
+TEST_P(CordTest, TryFlatSubstrInlined) {
   absl::Cord c("hello");
   c.RemovePrefix(1);
+  MaybeHarden(c);
   EXPECT_EQ(c.TryFlat(), "ello");
 }
 
-TEST(TryFlat, SubstrFlat) {
+TEST_P(CordTest, TryFlatSubstrFlat) {
   absl::Cord c("longer than 15 bytes");
-  c.RemovePrefix(1);
-  EXPECT_EQ(c.TryFlat(), "onger than 15 bytes");
+  absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
+  MaybeHarden(sub);
+  EXPECT_EQ(sub.TryFlat(), "onger than 15 bytes");
 }
 
-TEST(TryFlat, Concat) {
+TEST_P(CordTest, TryFlatConcat) {
   absl::Cord c = absl::MakeFragmentedCord({"hel", "lo"});
+  MaybeHarden(c);
   EXPECT_EQ(c.TryFlat(), absl::nullopt);
 }
 
-TEST(TryFlat, External) {
+TEST_P(CordTest, TryFlatExternal) {
   absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {});
+  MaybeHarden(c);
   EXPECT_EQ(c.TryFlat(), "hell");
 }
 
-TEST(TryFlat, SubstrExternal) {
+TEST_P(CordTest, TryFlatSubstrExternal) {
   absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {});
-  c.RemovePrefix(1);
-  EXPECT_EQ(c.TryFlat(), "ell");
+  absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
+  MaybeHarden(sub);
+  EXPECT_EQ(sub.TryFlat(), "ell");
 }
 
-TEST(TryFlat, SubstrConcat) {
-  absl::Cord c = absl::MakeFragmentedCord({"hello", " world"});
-  c.RemovePrefix(1);
-  EXPECT_EQ(c.TryFlat(), absl::nullopt);
+TEST_P(CordTest, TryFlatCommonlyAssumedInvariants) {
+  // The behavior tested below is not part of the API contract of Cord, but it's
+  // something we intend to be true in our current implementation.  This test
+  // exists to detect and prevent accidental breakage of the implementation.
+  absl::string_view fragments[] = {"A fragmented test",
+                                   " cord",
+                                   " to test subcords",
+                                   " of ",
+                                   "a",
+                                   " cord for",
+                                   " each chunk "
+                                   "returned by the ",
+                                   "iterator"};
+  absl::Cord c = absl::MakeFragmentedCord(fragments);
+  MaybeHarden(c);
+  int fragment = 0;
+  int offset = 0;
+  absl::Cord::CharIterator itc = c.char_begin();
+  for (absl::string_view sv : c.Chunks()) {
+    absl::string_view expected = fragments[fragment];
+    absl::Cord subcord1 = c.Subcord(offset, sv.length());
+    absl::Cord subcord2 = absl::Cord::AdvanceAndRead(&itc, sv.size());
+    EXPECT_EQ(subcord1.TryFlat(), expected);
+    EXPECT_EQ(subcord2.TryFlat(), expected);
+    ++fragment;
+    offset += sv.length();
+  }
 }
 
 static bool IsFlat(const absl::Cord& c) {
@@ -500,15 +1008,17 @@
   EXPECT_TRUE(IsFlat(c));
 }
 
-TEST(Cord, Flatten) {
+TEST_P(CordTest, Flatten) {
   VerifyFlatten(absl::Cord());
-  VerifyFlatten(absl::Cord("small cord"));
-  VerifyFlatten(absl::Cord("larger than small buffer optimization"));
-  VerifyFlatten(absl::MakeFragmentedCord({"small ", "fragmented ", "cord"}));
+  VerifyFlatten(MaybeHardened(absl::Cord("small cord")));
+  VerifyFlatten(
+      MaybeHardened(absl::Cord("larger than small buffer optimization")));
+  VerifyFlatten(MaybeHardened(
+      absl::MakeFragmentedCord({"small ", "fragmented ", "cord"})));
 
   // Test with a cord that is longer than the largest flat buffer
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
-  VerifyFlatten(absl::Cord(RandomLowercaseString(&rng, 8192)));
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
+  VerifyFlatten(MaybeHardened(absl::Cord(RandomLowercaseString(&rng, 8192))));
 }
 
 // Test data
@@ -554,7 +1064,7 @@
 };
 }  // namespace
 
-TEST(Cord, MultipleLengths) {
+TEST_P(CordTest, MultipleLengths) {
   TestData d;
   for (size_t i = 0; i < d.size(); i++) {
     std::string a = d.data(i);
@@ -562,22 +1072,26 @@
     {  // Construct from Cord
       absl::Cord tmp(a);
       absl::Cord x(tmp);
+      MaybeHarden(x);
       EXPECT_EQ(a, std::string(x)) << "'" << a << "'";
     }
 
     {  // Construct from absl::string_view
       absl::Cord x(a);
+      MaybeHarden(x);
       EXPECT_EQ(a, std::string(x)) << "'" << a << "'";
     }
 
     {  // Append cord to self
       absl::Cord self(a);
+      MaybeHarden(self);
       self.Append(self);
       EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'";
     }
 
     {  // Prepend cord to self
       absl::Cord self(a);
+      MaybeHarden(self);
       self.Prepend(self);
       EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'";
     }
@@ -589,12 +1103,14 @@
       {  // CopyFrom Cord
         absl::Cord x(a);
         absl::Cord y(b);
+        MaybeHarden(x);
         x = y;
         EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'";
       }
 
       {  // CopyFrom absl::string_view
         absl::Cord x(a);
+        MaybeHarden(x);
         x = b;
         EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'";
       }
@@ -602,12 +1118,14 @@
       {  // Cord::Append(Cord)
         absl::Cord x(a);
         absl::Cord y(b);
+        MaybeHarden(x);
         x.Append(y);
         EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'";
       }
 
       {  // Cord::Append(absl::string_view)
         absl::Cord x(a);
+        MaybeHarden(x);
         x.Append(b);
         EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'";
       }
@@ -615,12 +1133,14 @@
       {  // Cord::Prepend(Cord)
         absl::Cord x(a);
         absl::Cord y(b);
+        MaybeHarden(x);
         x.Prepend(y);
         EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'";
       }
 
       {  // Cord::Prepend(absl::string_view)
         absl::Cord x(a);
+        MaybeHarden(x);
         x.Prepend(b);
         EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'";
       }
@@ -630,25 +1150,29 @@
 
 namespace {
 
-TEST(Cord, RemoveSuffixWithExternalOrSubstring) {
+TEST_P(CordTest, RemoveSuffixWithExternalOrSubstring) {
   absl::Cord cord = absl::MakeCordFromExternal(
       "foo bar baz", [](absl::string_view s) { DoNothing(s, nullptr); });
-
   EXPECT_EQ("foo bar baz", std::string(cord));
 
+  MaybeHarden(cord);
+
   // This RemoveSuffix() will wrap the EXTERNAL node in a SUBSTRING node.
   cord.RemoveSuffix(4);
   EXPECT_EQ("foo bar", std::string(cord));
 
+  MaybeHarden(cord);
+
   // This RemoveSuffix() will adjust the SUBSTRING node in-place.
   cord.RemoveSuffix(4);
   EXPECT_EQ("foo", std::string(cord));
 }
 
-TEST(Cord, RemoveSuffixMakesZeroLengthNode) {
+TEST_P(CordTest, RemoveSuffixMakesZeroLengthNode) {
   absl::Cord c;
   c.Append(absl::Cord(std::string(100, 'x')));
   absl::Cord other_ref = c;  // Prevent inplace appends
+  MaybeHarden(c);
   c.Append(absl::Cord(std::string(200, 'y')));
   c.RemoveSuffix(200);
   EXPECT_EQ(std::string(100, 'x'), std::string(c));
@@ -672,24 +1196,27 @@
 }
 
 // Establish that ZedBlock does what we think it does.
-TEST(CordSpliceTest, ZedBlock) {
+TEST_P(CordTest, CordSpliceTestZedBlock) {
   absl::Cord blob = CordWithZedBlock(10);
+  MaybeHarden(blob);
   EXPECT_EQ(10, blob.size());
   std::string s;
   absl::CopyCordToString(blob, &s);
   EXPECT_EQ("zzzzzzzzzz", s);
 }
 
-TEST(CordSpliceTest, ZedBlock0) {
+TEST_P(CordTest, CordSpliceTestZedBlock0) {
   absl::Cord blob = CordWithZedBlock(0);
+  MaybeHarden(blob);
   EXPECT_EQ(0, blob.size());
   std::string s;
   absl::CopyCordToString(blob, &s);
   EXPECT_EQ("", s);
 }
 
-TEST(CordSpliceTest, ZedBlockSuffix1) {
+TEST_P(CordTest, CordSpliceTestZedBlockSuffix1) {
   absl::Cord blob = CordWithZedBlock(10);
+  MaybeHarden(blob);
   EXPECT_EQ(10, blob.size());
   absl::Cord suffix(blob);
   suffix.RemovePrefix(9);
@@ -700,8 +1227,9 @@
 }
 
 // Remove all of a prefix block
-TEST(CordSpliceTest, ZedBlockSuffix0) {
+TEST_P(CordTest, CordSpliceTestZedBlockSuffix0) {
   absl::Cord blob = CordWithZedBlock(10);
+  MaybeHarden(blob);
   EXPECT_EQ(10, blob.size());
   absl::Cord suffix(blob);
   suffix.RemovePrefix(10);
@@ -719,29 +1247,31 @@
 // Splice block into cord.
 absl::Cord SpliceCord(const absl::Cord& blob, int64_t offset,
                       const absl::Cord& block) {
-  ABSL_RAW_CHECK(offset >= 0, "");
-  ABSL_RAW_CHECK(offset + block.size() <= blob.size(), "");
+  CHECK_GE(offset, 0);
+  CHECK_LE(static_cast<size_t>(offset) + block.size(), blob.size());
   absl::Cord result(blob);
   result.RemoveSuffix(blob.size() - offset);
   result.Append(block);
   absl::Cord suffix(blob);
   suffix.RemovePrefix(offset + block.size());
   result.Append(suffix);
-  ABSL_RAW_CHECK(blob.size() == result.size(), "");
+  CHECK_EQ(blob.size(), result.size());
   return result;
 }
 
 // Taking an empty suffix of a block breaks appending.
-TEST(CordSpliceTest, RemoveEntireBlock1) {
+TEST_P(CordTest, CordSpliceTestRemoveEntireBlock1) {
   absl::Cord zero = CordWithZedBlock(10);
+  MaybeHarden(zero);
   absl::Cord suffix(zero);
   suffix.RemovePrefix(10);
   absl::Cord result;
   result.Append(suffix);
 }
 
-TEST(CordSpliceTest, RemoveEntireBlock2) {
+TEST_P(CordTest, CordSpliceTestRemoveEntireBlock2) {
   absl::Cord zero = CordWithZedBlock(10);
+  MaybeHarden(zero);
   absl::Cord prefix(zero);
   prefix.RemoveSuffix(10);
   absl::Cord suffix(zero);
@@ -750,16 +1280,22 @@
   result.Append(suffix);
 }
 
-TEST(CordSpliceTest, RemoveEntireBlock3) {
+TEST_P(CordTest, CordSpliceTestRemoveEntireBlock3) {
   absl::Cord blob = CordWithZedBlock(10);
   absl::Cord block = BigCord(10, 'b');
+  MaybeHarden(blob);
+  MaybeHarden(block);
   blob = SpliceCord(blob, 0, block);
 }
 
 struct CordCompareTestCase {
   template <typename LHS, typename RHS>
-  CordCompareTestCase(const LHS& lhs, const RHS& rhs)
-      : lhs_cord(lhs), rhs_cord(rhs) {}
+  CordCompareTestCase(const LHS& lhs, const RHS& rhs, bool use_crc)
+      : lhs_cord(lhs), rhs_cord(rhs) {
+    if (use_crc) {
+      lhs_cord.SetExpectedChecksum(1);
+    }
+  }
 
   absl::Cord lhs_cord;
   absl::Cord rhs_cord;
@@ -781,7 +1317,7 @@
       << "LHS=" << rhs_string << "; RHS=" << lhs_string;
 }
 
-TEST(Cord, Compare) {
+TEST_P(CordTest, Compare) {
   absl::Cord subcord("aaaaaBBBBBcccccDDDDD");
   subcord = subcord.Subcord(3, 10);
 
@@ -796,47 +1332,54 @@
   concat2.Append("cccccccccccDDDDDDDDDDDDDD");
   concat2.Append("DD");
 
+  const bool use_crc = UseCrc();
+
   std::vector<CordCompareTestCase> test_cases = {{
       // Inline cords
-      {"abcdef", "abcdef"},
-      {"abcdef", "abcdee"},
-      {"abcdef", "abcdeg"},
-      {"bbcdef", "abcdef"},
-      {"bbcdef", "abcdeg"},
-      {"abcdefa", "abcdef"},
-      {"abcdef", "abcdefa"},
+      {"abcdef", "abcdef", use_crc},
+      {"abcdef", "abcdee", use_crc},
+      {"abcdef", "abcdeg", use_crc},
+      {"bbcdef", "abcdef", use_crc},
+      {"bbcdef", "abcdeg", use_crc},
+      {"abcdefa", "abcdef", use_crc},
+      {"abcdef", "abcdefa", use_crc},
 
       // Small flat cords
-      {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD"},
-      {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD"},
-      {"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD"},
-      {"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX"},
-      {"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD"},
-      {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa"},
+      {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc},
+      {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD", use_crc},
+      {"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc},
+      {"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX", use_crc},
+      {"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD", use_crc},
+      {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa", use_crc},
 
       // Subcords
-      {subcord, subcord},
-      {subcord, "aaBBBBBccc"},
-      {subcord, "aaBBBBBccd"},
-      {subcord, "aaBBBBBccb"},
-      {subcord, "aaBBBBBxcb"},
-      {subcord, "aaBBBBBccca"},
-      {subcord, "aaBBBBBcc"},
+      {subcord, subcord, use_crc},
+      {subcord, "aaBBBBBccc", use_crc},
+      {subcord, "aaBBBBBccd", use_crc},
+      {subcord, "aaBBBBBccb", use_crc},
+      {subcord, "aaBBBBBxcb", use_crc},
+      {subcord, "aaBBBBBccca", use_crc},
+      {subcord, "aaBBBBBcc", use_crc},
 
       // Concats
-      {concat, concat},
+      {concat, concat, use_crc},
       {concat,
-       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD"},
+       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD",
+       use_crc},
       {concat,
-       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD"},
+       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD",
+       use_crc},
       {concat,
-       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD"},
+       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD",
+       use_crc},
       {concat,
-       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD"},
+       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD",
+       use_crc},
       {concat,
-       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe"},
+       "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe",
+       use_crc},
 
-      {concat, concat2},
+      {concat, concat2, use_crc},
   }};
 
   for (const auto& tc : test_cases) {
@@ -844,9 +1387,10 @@
   }
 }
 
-TEST(Cord, CompareAfterAssign) {
+TEST_P(CordTest, CompareAfterAssign) {
   absl::Cord a("aaaaaa1111111");
   absl::Cord b("aaaaaa2222222");
+  MaybeHarden(a);
   a = "cccccc";
   b = "cccccc";
   EXPECT_EQ(a, b);
@@ -873,8 +1417,8 @@
   EXPECT_EQ(expected, sign(c.Compare(d))) << c << ", " << d;
 }
 
-TEST(Compare, ComparisonIsUnsigned) {
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+TEST_P(CordTest, CompareComparisonIsUnsigned) {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
   std::uniform_int_distribution<uint32_t> uniform_uint8(0, 255);
   char x = static_cast<char>(uniform_uint8(rng));
   TestCompare(
@@ -882,9 +1426,9 @@
       absl::Cord(std::string(GetUniformRandomUpTo(&rng, 100), x ^ 0x80)), &rng);
 }
 
-TEST(Compare, RandomComparisons) {
+TEST_P(CordTest, CompareRandomComparisons) {
   const int kIters = 5000;
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
 
   int n = GetUniformRandomUpTo(&rng, 5000);
   absl::Cord a[] = {MakeExternalCord(n),
@@ -905,6 +1449,8 @@
       d.Append(a[GetUniformRandomUpTo(&rng, ABSL_ARRAYSIZE(a))]);
     }
     std::bernoulli_distribution coin_flip(0.5);
+    MaybeHarden(c);
+    MaybeHarden(d);
     TestCompare(coin_flip(rng) ? c : absl::Cord(std::string(c)),
                 coin_flip(rng) ? d : absl::Cord(std::string(d)), &rng);
   }
@@ -940,43 +1486,43 @@
   EXPECT_FALSE(b <= a);
 }
 
-TEST(ComparisonOperators, Cord_Cord) {
+TEST_P(CordTest, ComparisonOperators_Cord_Cord) {
   CompareOperators<absl::Cord, absl::Cord>();
 }
 
-TEST(ComparisonOperators, Cord_StringPiece) {
+TEST_P(CordTest, ComparisonOperators_Cord_StringPiece) {
   CompareOperators<absl::Cord, absl::string_view>();
 }
 
-TEST(ComparisonOperators, StringPiece_Cord) {
+TEST_P(CordTest, ComparisonOperators_StringPiece_Cord) {
   CompareOperators<absl::string_view, absl::Cord>();
 }
 
-TEST(ComparisonOperators, Cord_string) {
+TEST_P(CordTest, ComparisonOperators_Cord_string) {
   CompareOperators<absl::Cord, std::string>();
 }
 
-TEST(ComparisonOperators, string_Cord) {
+TEST_P(CordTest, ComparisonOperators_string_Cord) {
   CompareOperators<std::string, absl::Cord>();
 }
 
-TEST(ComparisonOperators, stdstring_Cord) {
+TEST_P(CordTest, ComparisonOperators_stdstring_Cord) {
   CompareOperators<std::string, absl::Cord>();
 }
 
-TEST(ComparisonOperators, Cord_stdstring) {
+TEST_P(CordTest, ComparisonOperators_Cord_stdstring) {
   CompareOperators<absl::Cord, std::string>();
 }
 
-TEST(ComparisonOperators, charstar_Cord) {
+TEST_P(CordTest, ComparisonOperators_charstar_Cord) {
   CompareOperators<const char*, absl::Cord>();
 }
 
-TEST(ComparisonOperators, Cord_charstar) {
+TEST_P(CordTest, ComparisonOperators_Cord_charstar) {
   CompareOperators<absl::Cord, const char*>();
 }
 
-TEST(ConstructFromExternal, ReleaserInvoked) {
+TEST_P(CordTest, ConstructFromExternalReleaserInvoked) {
   // Empty external memory means the releaser should be called immediately.
   {
     bool invoked = false;
@@ -1018,8 +1564,8 @@
   }
 }
 
-TEST(ConstructFromExternal, CompareContents) {
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+TEST_P(CordTest, ConstructFromExternalCompareContents) {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
 
   for (int length = 1; length <= 2048; length *= 2) {
     std::string data = RandomLowercaseString(&rng, length);
@@ -1030,12 +1576,13 @@
           EXPECT_EQ(external->size(), sv.size());
           delete external;
         });
+    MaybeHarden(cord);
     EXPECT_EQ(data, cord);
   }
 }
 
-TEST(ConstructFromExternal, LargeReleaser) {
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+TEST_P(CordTest, ConstructFromExternalLargeReleaser) {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
   constexpr size_t kLength = 256;
   std::string data = RandomLowercaseString(&rng, kLength);
   std::array<char, kLength> data_array;
@@ -1045,11 +1592,11 @@
     EXPECT_EQ(data, absl::string_view(data_array.data(), data_array.size()));
     invoked = true;
   };
-  (void)absl::MakeCordFromExternal(data, releaser);
+  (void)MaybeHardened(absl::MakeCordFromExternal(data, releaser));
   EXPECT_TRUE(invoked);
 }
 
-TEST(ConstructFromExternal, FunctionPointerReleaser) {
+TEST_P(CordTest, ConstructFromExternalFunctionPointerReleaser) {
   static absl::string_view data("hello world");
   static bool invoked;
   auto* releaser =
@@ -1058,15 +1605,15 @@
         invoked = true;
       });
   invoked = false;
-  (void)absl::MakeCordFromExternal(data, releaser);
+  (void)MaybeHardened(absl::MakeCordFromExternal(data, releaser));
   EXPECT_TRUE(invoked);
 
   invoked = false;
-  (void)absl::MakeCordFromExternal(data, *releaser);
+  (void)MaybeHardened(absl::MakeCordFromExternal(data, *releaser));
   EXPECT_TRUE(invoked);
 }
 
-TEST(ConstructFromExternal, MoveOnlyReleaser) {
+TEST_P(CordTest, ConstructFromExternalMoveOnlyReleaser) {
   struct Releaser {
     explicit Releaser(bool* invoked) : invoked(invoked) {}
     Releaser(Releaser&& other) noexcept : invoked(other.invoked) {}
@@ -1076,24 +1623,25 @@
   };
 
   bool invoked = false;
-  (void)absl::MakeCordFromExternal("dummy", Releaser(&invoked));
+  (void)MaybeHardened(absl::MakeCordFromExternal("dummy", Releaser(&invoked)));
   EXPECT_TRUE(invoked);
 }
 
-TEST(ConstructFromExternal, NoArgLambda) {
+TEST_P(CordTest, ConstructFromExternalNoArgLambda) {
   bool invoked = false;
-  (void)absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; });
+  (void)MaybeHardened(
+      absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; }));
   EXPECT_TRUE(invoked);
 }
 
-TEST(ConstructFromExternal, StringViewArgLambda) {
+TEST_P(CordTest, ConstructFromExternalStringViewArgLambda) {
   bool invoked = false;
-  (void)absl::MakeCordFromExternal(
-      "dummy", [&invoked](absl::string_view) { invoked = true; });
+  (void)MaybeHardened(absl::MakeCordFromExternal(
+      "dummy", [&invoked](absl::string_view) { invoked = true; }));
   EXPECT_TRUE(invoked);
 }
 
-TEST(ConstructFromExternal, NonTrivialReleaserDestructor) {
+TEST_P(CordTest, ConstructFromExternalNonTrivialReleaserDestructor) {
   struct Releaser {
     explicit Releaser(bool* destroyed) : destroyed(destroyed) {}
     ~Releaser() { *destroyed = true; }
@@ -1104,57 +1652,94 @@
 
   bool destroyed = false;
   Releaser releaser(&destroyed);
-  (void)absl::MakeCordFromExternal("dummy", releaser);
+  (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser));
   EXPECT_TRUE(destroyed);
 }
 
-TEST(ConstructFromExternal, ReferenceQualifierOverloads) {
-  struct Releaser {
-    void operator()(absl::string_view) & { *lvalue_invoked = true; }
-    void operator()(absl::string_view) && { *rvalue_invoked = true; }
+TEST_P(CordTest, ConstructFromExternalReferenceQualifierOverloads) {
+  enum InvokedAs { kMissing, kLValue, kRValue };
+  enum CopiedAs { kNone, kMove, kCopy };
+  struct Tracker {
+    CopiedAs copied_as = kNone;
+    InvokedAs invoked_as = kMissing;
 
-    bool* lvalue_invoked;
-    bool* rvalue_invoked;
+    void Record(InvokedAs rhs) {
+      ASSERT_EQ(invoked_as, kMissing);
+      invoked_as = rhs;
+    }
+
+    void Record(CopiedAs rhs) {
+      if (copied_as == kNone || rhs == kCopy) copied_as = rhs;
+    }
+  } tracker;
+
+  class Releaser {
+   public:
+    explicit Releaser(Tracker* tracker) : tr_(tracker) { *tracker = Tracker(); }
+    Releaser(Releaser&& rhs) : tr_(rhs.tr_) { tr_->Record(kMove); }
+    Releaser(const Releaser& rhs) : tr_(rhs.tr_) { tr_->Record(kCopy); }
+
+    void operator()(absl::string_view) & { tr_->Record(kLValue); }
+    void operator()(absl::string_view) && { tr_->Record(kRValue); }
+
+   private:
+    Tracker* tr_;
   };
 
-  bool lvalue_invoked = false;
-  bool rvalue_invoked = false;
-  Releaser releaser = {&lvalue_invoked, &rvalue_invoked};
-  (void)absl::MakeCordFromExternal("", releaser);
-  EXPECT_FALSE(lvalue_invoked);
-  EXPECT_TRUE(rvalue_invoked);
-  rvalue_invoked = false;
+  const Releaser releaser1(&tracker);
+  (void)MaybeHardened(absl::MakeCordFromExternal("", releaser1));
+  EXPECT_EQ(tracker.copied_as, kCopy);
+  EXPECT_EQ(tracker.invoked_as, kRValue);
 
-  (void)absl::MakeCordFromExternal("dummy", releaser);
-  EXPECT_FALSE(lvalue_invoked);
-  EXPECT_TRUE(rvalue_invoked);
-  rvalue_invoked = false;
+  const Releaser releaser2(&tracker);
+  (void)MaybeHardened(absl::MakeCordFromExternal("", releaser2));
+  EXPECT_EQ(tracker.copied_as, kCopy);
+  EXPECT_EQ(tracker.invoked_as, kRValue);
 
-  // NOLINTNEXTLINE: suppress clang-tidy std::move on trivially copyable type.
-  (void)absl::MakeCordFromExternal("dummy", std::move(releaser));
-  EXPECT_FALSE(lvalue_invoked);
-  EXPECT_TRUE(rvalue_invoked);
+  Releaser releaser3(&tracker);
+  (void)MaybeHardened(absl::MakeCordFromExternal("", std::move(releaser3)));
+  EXPECT_EQ(tracker.copied_as, kMove);
+  EXPECT_EQ(tracker.invoked_as, kRValue);
+
+  Releaser releaser4(&tracker);
+  (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser4));
+  EXPECT_EQ(tracker.copied_as, kCopy);
+  EXPECT_EQ(tracker.invoked_as, kRValue);
+
+  const Releaser releaser5(&tracker);
+  (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser5));
+  EXPECT_EQ(tracker.copied_as, kCopy);
+  EXPECT_EQ(tracker.invoked_as, kRValue);
+
+  Releaser releaser6(&tracker);
+  (void)MaybeHardened(absl::MakeCordFromExternal("foo", std::move(releaser6)));
+  EXPECT_EQ(tracker.copied_as, kMove);
+  EXPECT_EQ(tracker.invoked_as, kRValue);
 }
 
-TEST(ExternalMemory, BasicUsage) {
+TEST_P(CordTest, ExternalMemoryBasicUsage) {
   static const char* strings[] = {"", "hello", "there"};
   for (const char* str : strings) {
     absl::Cord dst("(prefix)");
+    MaybeHarden(dst);
     AddExternalMemory(str, &dst);
+    MaybeHarden(dst);
     dst.Append("(suffix)");
     EXPECT_EQ((std::string("(prefix)") + str + std::string("(suffix)")),
               std::string(dst));
   }
 }
 
-TEST(ExternalMemory, RemovePrefixSuffix) {
+TEST_P(CordTest, ExternalMemoryRemovePrefixSuffix) {
   // Exhaustively try all sub-strings.
   absl::Cord cord = MakeComposite();
   std::string s = std::string(cord);
   for (int offset = 0; offset <= s.size(); offset++) {
     for (int length = 0; length <= s.size() - offset; length++) {
       absl::Cord result(cord);
+      MaybeHarden(result);
       result.RemovePrefix(offset);
+      MaybeHarden(result);
       result.RemoveSuffix(result.size() - length);
       EXPECT_EQ(s.substr(offset, length), std::string(result))
           << offset << " " << length;
@@ -1162,11 +1747,13 @@
   }
 }
 
-TEST(ExternalMemory, Get) {
+TEST_P(CordTest, ExternalMemoryGet) {
   absl::Cord cord("hello");
   AddExternalMemory(" world!", &cord);
+  MaybeHarden(cord);
   AddExternalMemory(" how are ", &cord);
   cord.Append(" you?");
+  MaybeHarden(cord);
   std::string s = std::string(cord);
   for (int i = 0; i < s.size(); i++) {
     EXPECT_EQ(s[i], cord[i]);
@@ -1174,58 +1761,157 @@
 }
 
 // CordMemoryUsage tests verify the correctness of the EstimatedMemoryUsage()
-// These tests take into account that the reported memory usage is approximate
-// and non-deterministic. For all tests, We verify that the reported memory
-// usage is larger than `size()`, and less than `size() * 1.5` as a cord should
-// never reserve more 'extra' capacity than half of its size as it grows.
-// Additionally we have some whiteboxed expectations based on our knowledge of
-// the layout and size of empty and inlined cords, and flat nodes.
+// We use whiteboxed expectations based on our knowledge of the layout and size
+// of empty and inlined cords, and flat nodes.
 
-TEST(CordMemoryUsage, Empty) {
-  EXPECT_EQ(sizeof(absl::Cord), absl::Cord().EstimatedMemoryUsage());
+constexpr auto kFairShare = absl::CordMemoryAccounting::kFairShare;
+constexpr auto kTotalMorePrecise =
+    absl::CordMemoryAccounting::kTotalMorePrecise;
+
+// Creates a cord of `n` `c` values, making sure no string stealing occurs.
+absl::Cord MakeCord(size_t n, char c) {
+  const std::string s(n, c);
+  return absl::Cord(s);
 }
 
-TEST(CordMemoryUsage, Embedded) {
+TEST(CordTest, CordMemoryUsageEmpty) {
+  absl::Cord cord;
+  EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage());
+  EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kFairShare));
+  EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kTotalMorePrecise));
+}
+
+TEST(CordTest, CordMemoryUsageInlined) {
   absl::Cord a("hello");
   EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord));
+  EXPECT_EQ(a.EstimatedMemoryUsage(kFairShare), sizeof(absl::Cord));
+  EXPECT_EQ(a.EstimatedMemoryUsage(kTotalMorePrecise), sizeof(absl::Cord));
 }
 
-TEST(CordMemoryUsage, EmbeddedAppend) {
-  absl::Cord a("a");
-  absl::Cord b("bcd");
-  EXPECT_EQ(b.EstimatedMemoryUsage(), sizeof(absl::Cord));
-  a.Append(b);
-  EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord));
-}
-
-TEST(CordMemoryUsage, ExternalMemory) {
-  static const int kLength = 1000;
+TEST(CordTest, CordMemoryUsageExternalMemory) {
   absl::Cord cord;
-  AddExternalMemory(std::string(kLength, 'x'), &cord);
-  EXPECT_GT(cord.EstimatedMemoryUsage(), kLength);
-  EXPECT_LE(cord.EstimatedMemoryUsage(), kLength * 1.5);
+  AddExternalMemory(std::string(1000, 'x'), &cord);
+  const size_t expected =
+      sizeof(absl::Cord) + 1000 + sizeof(CordRepExternal) + sizeof(intptr_t);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(), expected);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), expected);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise), expected);
 }
 
-TEST(CordMemoryUsage, Flat) {
-  static const int kLength = 125;
-  absl::Cord a(std::string(kLength, 'a'));
-  EXPECT_GT(a.EstimatedMemoryUsage(), kLength);
-  EXPECT_LE(a.EstimatedMemoryUsage(), kLength * 1.5);
+TEST(CordTest, CordMemoryUsageFlat) {
+  absl::Cord cord = MakeCord(1000, 'a');
+  const size_t flat_size =
+      absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
+  EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + flat_size);
 }
 
-TEST(CordMemoryUsage, AppendFlat) {
-  using absl::strings_internal::CordTestAccess;
-  absl::Cord a(std::string(CordTestAccess::MaxFlatLength(), 'a'));
-  size_t length = a.EstimatedMemoryUsage();
-  a.Append(std::string(CordTestAccess::MaxFlatLength(), 'b'));
-  size_t delta = a.EstimatedMemoryUsage() - length;
-  EXPECT_GT(delta, CordTestAccess::MaxFlatLength());
-  EXPECT_LE(delta, CordTestAccess::MaxFlatLength() * 1.5);
+TEST(CordTest, CordMemoryUsageSubStringSharedFlat) {
+  absl::Cord flat = MakeCord(2000, 'a');
+  const size_t flat_size =
+      absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
+  absl::Cord cord = flat.Subcord(500, 1000);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(),
+            sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size / 2);
+}
+
+TEST(CordTest, CordMemoryUsageFlatShared) {
+  absl::Cord shared = MakeCord(1000, 'a');
+  absl::Cord cord(shared);
+  const size_t flat_size =
+      absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
+  EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + flat_size / 2);
+}
+
+TEST(CordTest, CordMemoryUsageFlatHardenedAndShared) {
+  absl::Cord shared = MakeCord(1000, 'a');
+  absl::Cord cord(shared);
+  const size_t flat_size =
+      absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
+  cord.SetExpectedChecksum(1);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(),
+            sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size / 2);
+
+  absl::Cord cord2(cord);
+  EXPECT_EQ(cord2.EstimatedMemoryUsage(),
+            sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
+  EXPECT_EQ(cord2.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
+  EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + (sizeof(CordRepCrc) + flat_size / 2) / 2);
+}
+
+TEST(CordTest, CordMemoryUsageBTree) {
+  absl::Cord cord1;
+  size_t flats1_size = 0;
+  absl::Cord flats1[4] = {MakeCord(1000, 'a'), MakeCord(1100, 'a'),
+                          MakeCord(1200, 'a'), MakeCord(1300, 'a')};
+  for (absl::Cord flat : flats1) {
+    flats1_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
+    cord1.Append(std::move(flat));
+  }
+
+  // Make sure the created cord is a BTREE tree. Under some builds such as
+  // windows DLL, we may have ODR like effects on the flag, meaning the DLL
+  // code will run with the picked up default.
+  if (!absl::CordTestPeer::Tree(cord1)->IsBtree()) {
+    LOG(WARNING) << "Cord library code not respecting btree flag";
+    return;
+  }
+
+  size_t rep1_size = sizeof(CordRepBtree) + flats1_size;
+  size_t rep1_shared_size = sizeof(CordRepBtree) + flats1_size / 2;
+
+  EXPECT_EQ(cord1.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep1_size);
+  EXPECT_EQ(cord1.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + rep1_size);
+  EXPECT_EQ(cord1.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + rep1_shared_size);
+
+  absl::Cord cord2;
+  size_t flats2_size = 0;
+  absl::Cord flats2[4] = {MakeCord(600, 'a'), MakeCord(700, 'a'),
+                          MakeCord(800, 'a'), MakeCord(900, 'a')};
+  for (absl::Cord& flat : flats2) {
+    flats2_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
+    cord2.Append(std::move(flat));
+  }
+  size_t rep2_size = sizeof(CordRepBtree) + flats2_size;
+
+  EXPECT_EQ(cord2.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep2_size);
+  EXPECT_EQ(cord2.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + rep2_size);
+  EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + rep2_size);
+
+  absl::Cord cord(cord1);
+  cord.Append(std::move(cord2));
+
+  EXPECT_EQ(cord.EstimatedMemoryUsage(),
+            sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
+  EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+            sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_shared_size / 2 +
+                rep2_size);
 }
 
 // Regtest for a change that had to be rolled back because it expanded out
 // of the InlineRep too soon, which was observable through MemoryUsage().
-TEST(CordMemoryUsage, InlineRep) {
+TEST_P(CordTest, CordMemoryUsageInlineRep) {
   constexpr size_t kMaxInline = 15;  // Cord::InlineRep::N
   const std::string small_string(kMaxInline, 'x');
   absl::Cord c1(small_string);
@@ -1236,17 +1922,79 @@
   EXPECT_EQ(c1.EstimatedMemoryUsage(), c2.EstimatedMemoryUsage());
 }
 
+TEST_P(CordTest, CordMemoryUsageTotalMorePreciseMode) {
+  constexpr size_t kChunkSize = 2000;
+  std::string tmp_str(kChunkSize, 'x');
+  const absl::Cord flat(std::move(tmp_str));
+
+  // Construct `fragmented` with two references into the same
+  // underlying buffer shared with `flat`:
+  absl::Cord fragmented(flat);
+  fragmented.Append(flat);
+
+  // Memory usage of `flat`, minus the top-level Cord object:
+  const size_t flat_internal_usage =
+      flat.EstimatedMemoryUsage() - sizeof(absl::Cord);
+
+  // `fragmented` holds a Cord and a CordRepBtree. That tree points to two
+  // copies of flat's internals, which we expect to dedup:
+  EXPECT_EQ(fragmented.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) +
+            sizeof(CordRepBtree) +
+            flat_internal_usage);
+
+  // This is a case where kTotal produces an overestimate:
+  EXPECT_EQ(fragmented.EstimatedMemoryUsage(),
+            sizeof(absl::Cord) +
+            sizeof(CordRepBtree) +
+            2 * flat_internal_usage);
+}
+
+TEST_P(CordTest, CordMemoryUsageTotalMorePreciseModeWithSubstring) {
+  constexpr size_t kChunkSize = 2000;
+  std::string tmp_str(kChunkSize, 'x');
+  const absl::Cord flat(std::move(tmp_str));
+
+  // Construct `fragmented` with two references into the same
+  // underlying buffer shared with `flat`.
+  //
+  // This time, each reference is through a Subcord():
+  absl::Cord fragmented;
+  fragmented.Append(flat.Subcord(1, kChunkSize - 2));
+  fragmented.Append(flat.Subcord(1, kChunkSize - 2));
+
+  // Memory usage of `flat`, minus the top-level Cord object:
+  const size_t flat_internal_usage =
+      flat.EstimatedMemoryUsage() - sizeof(absl::Cord);
+
+  // `fragmented` holds a Cord and a CordRepBtree. That tree points to two
+  // CordRepSubstrings, each pointing at flat's internals.
+  EXPECT_EQ(fragmented.EstimatedMemoryUsage(kTotalMorePrecise),
+            sizeof(absl::Cord) +
+            sizeof(CordRepBtree) +
+            2 * sizeof(CordRepSubstring) +
+            flat_internal_usage);
+
+  // This is a case where kTotal produces an overestimate:
+  EXPECT_EQ(fragmented.EstimatedMemoryUsage(),
+            sizeof(absl::Cord) +
+            sizeof(CordRepBtree) +
+            2 * sizeof(CordRepSubstring) +
+            2 * flat_internal_usage);
+}
 }  // namespace
 
 // Regtest for 7510292 (fix a bug introduced by 7465150)
-TEST(Cord, Concat_Append) {
+TEST_P(CordTest, Concat_Append) {
   // Create a rep of type CONCAT
   absl::Cord s1("foobarbarbarbarbar");
+  MaybeHarden(s1);
   s1.Append("abcdefgabcdefgabcdefgabcdefgabcdefgabcdefgabcdefg");
   size_t size = s1.size();
 
   // Create a copy of s1 and append to it.
   absl::Cord s2 = s1;
+  MaybeHarden(s2);
   s2.Append("x");
 
   // 7465150 modifies s1 when it shouldn't.
@@ -1254,10 +2002,96 @@
   EXPECT_EQ(s2.size(), size + 1);
 }
 
-TEST(MakeFragmentedCord, MakeFragmentedCordFromInitializerList) {
+TEST_P(CordTest, DiabolicalGrowth) {
+  // This test exercises a diabolical Append(<one char>) on a cord, making the
+  // cord shared before each Append call resulting in a terribly fragmented
+  // resulting cord.
+  // TODO(b/183983616): Apply some minimum compaction when copying a shared
+  // source cord into a mutable copy for updates in CordRepRing.
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
+  const std::string expected = RandomLowercaseString(&rng, 5000);
+  absl::Cord cord;
+  for (char c : expected) {
+    absl::Cord shared(cord);
+    cord.Append(absl::string_view(&c, 1));
+    MaybeHarden(cord);
+  }
+  std::string value;
+  absl::CopyCordToString(cord, &value);
+  EXPECT_EQ(value, expected);
+  LOG(INFO) << "Diabolical size allocated = " << cord.EstimatedMemoryUsage();
+}
+
+// The following tests check support for >4GB cords in 64-bit binaries, and
+// 2GB-4GB cords in 32-bit binaries.  This function returns the large cord size
+// that's appropriate for the binary.
+
+// Construct a huge cord with the specified valid prefix.
+static absl::Cord MakeHuge(absl::string_view prefix) {
+  absl::Cord cord;
+  if (sizeof(size_t) > 4) {
+    // In 64-bit binaries, test 64-bit Cord support.
+    const size_t size =
+        static_cast<size_t>(std::numeric_limits<uint32_t>::max()) + 314;
+    cord.Append(absl::MakeCordFromExternal(
+        absl::string_view(prefix.data(), size),
+        [](absl::string_view s) { DoNothing(s, nullptr); }));
+  } else {
+    // Cords are limited to 32-bit lengths in 32-bit binaries.  The following
+    // tests check for use of "signed int" to represent Cord length/offset.
+    // However absl::string_view does not allow lengths >= (1u<<31), so we need
+    // to append in two parts;
+    const size_t s1 = (1u << 31) - 1;
+    // For shorter cord, `Append` copies the data rather than allocating a new
+    // node. The threshold is currently set to 511, so `s2` needs to be bigger
+    // to not trigger the copy.
+    const size_t s2 = 600;
+    cord.Append(absl::MakeCordFromExternal(
+        absl::string_view(prefix.data(), s1),
+        [](absl::string_view s) { DoNothing(s, nullptr); }));
+    cord.Append(absl::MakeCordFromExternal(
+        absl::string_view("", s2),
+        [](absl::string_view s) { DoNothing(s, nullptr); }));
+  }
+  return cord;
+}
+
+TEST_P(CordTest, HugeCord) {
+  absl::Cord cord = MakeHuge("huge cord");
+  MaybeHarden(cord);
+
+  const size_t acceptable_delta =
+      100 + (UseCrc() ? sizeof(absl::cord_internal::CordRepCrc) : 0);
+  EXPECT_LE(cord.size(), cord.EstimatedMemoryUsage());
+  EXPECT_GE(cord.size() + acceptable_delta, cord.EstimatedMemoryUsage());
+}
+
+// Tests that Append() works ok when handed a self reference
+TEST_P(CordTest, AppendSelf) {
+  // Test the empty case.
+  absl::Cord empty;
+  MaybeHarden(empty);
+  empty.Append(empty);
+  ASSERT_EQ(empty, "");
+
+  // We run the test until data is ~16K
+  // This guarantees it covers small, medium and large data.
+  std::string control_data = "Abc";
+  absl::Cord data(control_data);
+  while (control_data.length() < 0x4000) {
+    MaybeHarden(data);
+    data.Append(data);
+    control_data.append(control_data);
+    ASSERT_EQ(control_data, data);
+  }
+}
+
+TEST_P(CordTest, MakeFragmentedCordFromInitializerList) {
   absl::Cord fragmented =
       absl::MakeFragmentedCord({"A ", "fragmented ", "Cord"});
 
+  MaybeHarden(fragmented);
+
   EXPECT_EQ("A fragmented Cord", fragmented);
 
   auto chunk_it = fragmented.chunk_begin();
@@ -1274,10 +2108,12 @@
   ASSERT_TRUE(++chunk_it == fragmented.chunk_end());
 }
 
-TEST(MakeFragmentedCord, MakeFragmentedCordFromVector) {
+TEST_P(CordTest, MakeFragmentedCordFromVector) {
   std::vector<absl::string_view> chunks = {"A ", "fragmented ", "Cord"};
   absl::Cord fragmented = absl::MakeFragmentedCord(chunks);
 
+  MaybeHarden(fragmented);
+
   EXPECT_EQ("A fragmented Cord", fragmented);
 
   auto chunk_it = fragmented.chunk_begin();
@@ -1294,7 +2130,7 @@
   ASSERT_TRUE(++chunk_it == fragmented.chunk_end());
 }
 
-TEST(CordChunkIterator, Traits) {
+TEST_P(CordTest, CordChunkIteratorTraits) {
   static_assert(std::is_copy_constructible<absl::Cord::ChunkIterator>::value,
                 "");
   static_assert(std::is_copy_assignable<absl::Cord::ChunkIterator>::value, "");
@@ -1375,39 +2211,115 @@
   EXPECT_TRUE(post_iter == cord.chunk_end());  // NOLINT
 }
 
-TEST(CordChunkIterator, Operations) {
+TEST_P(CordTest, CordChunkIteratorOperations) {
   absl::Cord empty_cord;
   VerifyChunkIterator(empty_cord, 0);
 
   absl::Cord small_buffer_cord("small cord");
+  MaybeHarden(small_buffer_cord);
   VerifyChunkIterator(small_buffer_cord, 1);
 
   absl::Cord flat_node_cord("larger than small buffer optimization");
+  MaybeHarden(flat_node_cord);
   VerifyChunkIterator(flat_node_cord, 1);
 
-  VerifyChunkIterator(
-      absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ",
-                                "testing ", "chunk ", "iterations."}),
-      8);
+  VerifyChunkIterator(MaybeHardened(absl::MakeFragmentedCord(
+                          {"a ", "small ", "fragmented ", "cord ", "for ",
+                           "testing ", "chunk ", "iterations."})),
+                      8);
 
   absl::Cord reused_nodes_cord(std::string(40, 'c'));
   reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'b')));
+  MaybeHarden(reused_nodes_cord);
   reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'a')));
   size_t expected_chunks = 3;
   for (int i = 0; i < 8; ++i) {
     reused_nodes_cord.Prepend(reused_nodes_cord);
+    MaybeHarden(reused_nodes_cord);
     expected_chunks *= 2;
     VerifyChunkIterator(reused_nodes_cord, expected_chunks);
   }
 
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
   absl::Cord flat_cord(RandomLowercaseString(&rng, 256));
   absl::Cord subcords;
   for (int i = 0; i < 128; ++i) subcords.Prepend(flat_cord.Subcord(i, 128));
   VerifyChunkIterator(subcords, 128);
 }
 
-TEST(CordCharIterator, Traits) {
+
+TEST_P(CordTest, AdvanceAndReadOnDataEdge) {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
+  const std::string data = RandomLowercaseString(&rng, 2000);
+  for (bool as_flat : {true, false}) {
+    SCOPED_TRACE(as_flat ? "Flat" : "External");
+
+    absl::Cord cord =
+        as_flat ? absl::Cord(data)
+                : absl::MakeCordFromExternal(data, [](absl::string_view) {});
+    auto it = cord.Chars().begin();
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+    EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*");
+#endif
+
+    it = cord.Chars().begin();
+    absl::Cord frag = cord.AdvanceAndRead(&it, 2000);
+    EXPECT_EQ(frag, data);
+    EXPECT_TRUE(it == cord.Chars().end());
+
+    it = cord.Chars().begin();
+    frag = cord.AdvanceAndRead(&it, 200);
+    EXPECT_EQ(frag, data.substr(0, 200));
+    EXPECT_FALSE(it == cord.Chars().end());
+
+    frag = cord.AdvanceAndRead(&it, 1500);
+    EXPECT_EQ(frag, data.substr(200, 1500));
+    EXPECT_FALSE(it == cord.Chars().end());
+
+    frag = cord.AdvanceAndRead(&it, 300);
+    EXPECT_EQ(frag, data.substr(1700, 300));
+    EXPECT_TRUE(it == cord.Chars().end());
+  }
+}
+
+TEST_P(CordTest, AdvanceAndReadOnSubstringDataEdge) {
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
+  const std::string data = RandomLowercaseString(&rng, 2500);
+  for (bool as_flat : {true, false}) {
+    SCOPED_TRACE(as_flat ? "Flat" : "External");
+
+    absl::Cord cord =
+        as_flat ? absl::Cord(data)
+                : absl::MakeCordFromExternal(data, [](absl::string_view) {});
+    cord = cord.Subcord(200, 2000);
+    const std::string substr = data.substr(200, 2000);
+
+    auto it = cord.Chars().begin();
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+    EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*");
+#endif
+
+    it = cord.Chars().begin();
+    absl::Cord frag = cord.AdvanceAndRead(&it, 2000);
+    EXPECT_EQ(frag, substr);
+    EXPECT_TRUE(it == cord.Chars().end());
+
+    it = cord.Chars().begin();
+    frag = cord.AdvanceAndRead(&it, 200);
+    EXPECT_EQ(frag, substr.substr(0, 200));
+    EXPECT_FALSE(it == cord.Chars().end());
+
+    frag = cord.AdvanceAndRead(&it, 1500);
+    EXPECT_EQ(frag, substr.substr(200, 1500));
+    EXPECT_FALSE(it == cord.Chars().end());
+
+    frag = cord.AdvanceAndRead(&it, 300);
+    EXPECT_EQ(frag, substr.substr(1700, 300));
+    EXPECT_TRUE(it == cord.Chars().end());
+  }
+}
+
+TEST_P(CordTest, CharIteratorTraits) {
   static_assert(std::is_copy_constructible<absl::Cord::CharIterator>::value,
                 "");
   static_assert(std::is_copy_assignable<absl::Cord::CharIterator>::value, "");
@@ -1516,44 +2428,88 @@
   }
 }
 
-TEST(CordCharIterator, Operations) {
+TEST_P(CordTest, CharIteratorOperations) {
   absl::Cord empty_cord;
   VerifyCharIterator(empty_cord);
 
   absl::Cord small_buffer_cord("small cord");
+  MaybeHarden(small_buffer_cord);
   VerifyCharIterator(small_buffer_cord);
 
   absl::Cord flat_node_cord("larger than small buffer optimization");
+  MaybeHarden(flat_node_cord);
   VerifyCharIterator(flat_node_cord);
 
-  VerifyCharIterator(
+  VerifyCharIterator(MaybeHardened(
       absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ",
-                                "testing ", "character ", "iteration."}));
+                                "testing ", "character ", "iteration."})));
 
   absl::Cord reused_nodes_cord("ghi");
   reused_nodes_cord.Prepend(absl::Cord("def"));
   reused_nodes_cord.Prepend(absl::Cord("abc"));
   for (int i = 0; i < 4; ++i) {
     reused_nodes_cord.Prepend(reused_nodes_cord);
+    MaybeHarden(reused_nodes_cord);
     VerifyCharIterator(reused_nodes_cord);
   }
 
-  RandomEngine rng(testing::GTEST_FLAG(random_seed));
+  RandomEngine rng(GTEST_FLAG_GET(random_seed));
   absl::Cord flat_cord(RandomLowercaseString(&rng, 256));
   absl::Cord subcords;
-  for (int i = 0; i < 4; ++i) subcords.Prepend(flat_cord.Subcord(16 * i, 128));
+  for (int i = 0; i < 4; ++i) {
+    subcords.Prepend(flat_cord.Subcord(16 * i, 128));
+    MaybeHarden(subcords);
+  }
   VerifyCharIterator(subcords);
 }
 
-TEST(Cord, StreamingOutput) {
+TEST_P(CordTest, CharIteratorAdvanceAndRead) {
+  // Create a Cord holding 6 flats of 2500 bytes each, and then iterate over it
+  // reading 150, 1500, 2500 and 3000 bytes. This will result in all possible
+  // partial, full and straddled read combinations including reads below
+  // kMaxBytesToCopy. b/197776822 surfaced a bug for a specific partial, small
+  // read 'at end' on Cord which caused a failure on attempting to read past the
+  // end in CordRepBtreeReader which was not covered by any existing test.
+  constexpr int kBlocks = 6;
+  constexpr size_t kBlockSize = 2500;
+  constexpr size_t kChunkSize1 = 1500;
+  constexpr size_t kChunkSize2 = 2500;
+  constexpr size_t kChunkSize3 = 3000;
+  constexpr size_t kChunkSize4 = 150;
+  RandomEngine rng;
+  std::string data = RandomLowercaseString(&rng, kBlocks * kBlockSize);
+  absl::Cord cord;
+  for (int i = 0; i < kBlocks; ++i) {
+    const std::string block = data.substr(i * kBlockSize, kBlockSize);
+    cord.Append(absl::Cord(block));
+  }
+
+  MaybeHarden(cord);
+
+  for (size_t chunk_size :
+       {kChunkSize1, kChunkSize2, kChunkSize3, kChunkSize4}) {
+    absl::Cord::CharIterator it = cord.char_begin();
+    size_t offset = 0;
+    while (offset < data.length()) {
+      const size_t n = std::min<size_t>(data.length() - offset, chunk_size);
+      absl::Cord chunk = cord.AdvanceAndRead(&it, n);
+      ASSERT_EQ(chunk.size(), n);
+      ASSERT_EQ(chunk.Compare(data.substr(offset, n)), 0);
+      offset += n;
+    }
+  }
+}
+
+TEST_P(CordTest, StreamingOutput) {
   absl::Cord c =
       absl::MakeFragmentedCord({"A ", "small ", "fragmented ", "Cord", "."});
+  MaybeHarden(c);
   std::stringstream output;
   output << c;
   EXPECT_EQ("A small fragmented Cord.", output.str());
 }
 
-TEST(Cord, ForEachChunk) {
+TEST_P(CordTest, ForEachChunk) {
   for (int num_elements : {1, 10, 200}) {
     SCOPED_TRACE(num_elements);
     std::vector<std::string> cord_chunks;
@@ -1561,6 +2517,7 @@
       cord_chunks.push_back(absl::StrCat("[", i, "]"));
     }
     absl::Cord c = absl::MakeFragmentedCord(cord_chunks);
+    MaybeHarden(c);
 
     std::vector<std::string> iterated_chunks;
     absl::CordTestPeer::ForEachChunk(c,
@@ -1571,13 +2528,14 @@
   }
 }
 
-TEST(Cord, SmallBufferAssignFromOwnData) {
+TEST_P(CordTest, SmallBufferAssignFromOwnData) {
   constexpr size_t kMaxInline = 15;
   std::string contents = "small buff cord";
   EXPECT_EQ(contents.size(), kMaxInline);
   for (size_t pos = 0; pos < contents.size(); ++pos) {
     for (size_t count = contents.size() - pos; count > 0; --count) {
       absl::Cord c(contents);
+      MaybeHarden(c);
       absl::string_view flat = c.Flatten();
       c = flat.substr(pos, count);
       EXPECT_EQ(c, contents.substr(pos, count))
@@ -1586,16 +2544,20 @@
   }
 }
 
-TEST(Cord, Format) {
+TEST_P(CordTest, Format) {
   absl::Cord c;
   absl::Format(&c, "There were %04d little %s.", 3, "pigs");
   EXPECT_EQ(c, "There were 0003 little pigs.");
+  MaybeHarden(c);
   absl::Format(&c, "And %-3llx bad wolf!", 1);
+  MaybeHarden(c);
   EXPECT_EQ(c, "There were 0003 little pigs.And 1   bad wolf!");
 }
 
-TEST(CordDeathTest, Hardening) {
+TEST_P(CordTest, Hardening) {
   absl::Cord cord("hello");
+  MaybeHarden(cord);
+
   // These statement should abort the program in all builds modes.
   EXPECT_DEATH_IF_SUPPORTED(cord.RemovePrefix(6), "");
   EXPECT_DEATH_IF_SUPPORTED(cord.RemoveSuffix(6), "");
@@ -1613,3 +2575,631 @@
   EXPECT_DEATH_IF_SUPPORTED(static_cast<void>(cord.chunk_end()->empty()), "");
   EXPECT_DEATH_IF_SUPPORTED(++cord.chunk_end(), "");
 }
+
+// This test mimics a specific (and rare) application repeatedly splitting a
+// cord, inserting (overwriting) a string value, and composing a new cord from
+// the three pieces. This is hostile towards a Btree implementation: A split of
+// a node at any level is likely to have the right-most edge of the left split,
+// and the left-most edge of the right split shared. For example, splitting a
+// leaf node with 6 edges will result likely in a 1-6, 2-5, 3-4, etc. split,
+// sharing the 'split node'. When recomposing such nodes, we 'injected' an edge
+// in that node. As this happens with some probability on each level of the
+// tree, this will quickly grow the tree until it reaches maximum height.
+TEST_P(CordTest, BtreeHostileSplitInsertJoin) {
+  absl::BitGen bitgen;
+
+  // Start with about 1GB of data
+  std::string data(1 << 10, 'x');
+  absl::Cord buffer(data);
+  absl::Cord cord;
+  for (int i = 0; i < 1000000; ++i) {
+    cord.Append(buffer);
+  }
+
+  for (int j = 0; j < 1000; ++j) {
+    MaybeHarden(cord);
+    size_t offset = absl::Uniform(bitgen, 0u, cord.size());
+    size_t length = absl::Uniform(bitgen, 100u, data.size());
+    if (cord.size() == offset) {
+      cord.Append(absl::string_view(data.data(), length));
+    } else {
+      absl::Cord suffix;
+      if (offset + length < cord.size()) {
+        suffix = cord;
+        suffix.RemovePrefix(offset + length);
+      }
+      if (cord.size() > offset) {
+        cord.RemoveSuffix(cord.size() - offset);
+      }
+      cord.Append(absl::string_view(data.data(), length));
+      if (!suffix.empty()) {
+        cord.Append(suffix);
+      }
+    }
+  }
+}
+
+class AfterExitCordTester {
+ public:
+  bool Set(absl::Cord* cord, absl::string_view expected) {
+    cord_ = cord;
+    expected_ = expected;
+    return true;
+  }
+
+  ~AfterExitCordTester() {
+    EXPECT_EQ(*cord_, expected_);
+  }
+ private:
+  absl::Cord* cord_;
+  absl::string_view expected_;
+};
+
+// Deliberately prevents the destructor for an absl::Cord from running. The cord
+// is accessible via the cord member during the lifetime of the CordLeaker.
+// After the CordLeaker is destroyed, pointers to the cord will remain valid
+// until the CordLeaker's memory is deallocated.
+struct CordLeaker {
+  union {
+    absl::Cord cord;
+  };
+
+  template <typename Str>
+  constexpr explicit CordLeaker(const Str& str) : cord(str) {}
+
+  ~CordLeaker() {
+    // Don't do anything, including running cord's destructor. (cord's
+    // destructor won't run automatically because cord is hidden inside a
+    // union.)
+  }
+};
+
+template <typename Str>
+void TestConstinitConstructor(Str) {
+  const auto expected = Str::value;
+  // Defined before `cord` to be destroyed after it.
+  static AfterExitCordTester exit_tester;  // NOLINT
+  ABSL_CONST_INIT static CordLeaker cord_leaker(Str{});  // NOLINT
+  // cord_leaker is static, so this reference will remain valid through the end
+  // of program execution.
+  static absl::Cord& cord = cord_leaker.cord;
+  static bool init_exit_tester = exit_tester.Set(&cord, expected);
+  (void)init_exit_tester;
+
+  EXPECT_EQ(cord, expected);
+  // Copy the object and test the copy, and the original.
+  {
+    absl::Cord copy = cord;
+    EXPECT_EQ(copy, expected);
+  }
+  // The original still works
+  EXPECT_EQ(cord, expected);
+
+  // Try making adding more structure to the tree.
+  {
+    absl::Cord copy = cord;
+    std::string expected_copy(expected);
+    for (int i = 0; i < 10; ++i) {
+      copy.Append(cord);
+      absl::StrAppend(&expected_copy, expected);
+      EXPECT_EQ(copy, expected_copy);
+    }
+  }
+
+  // Make sure we are using the right branch during constant evaluation.
+  EXPECT_EQ(absl::CordTestPeer::IsTree(cord), cord.size() >= 16);
+
+  for (int i = 0; i < 10; ++i) {
+    // Make a few more Cords from the same global rep.
+    // This tests what happens when the refcount for it gets below 1.
+    EXPECT_EQ(expected, absl::Cord(Str{}));
+  }
+}
+
+constexpr int SimpleStrlen(const char* p) {
+  return *p ? 1 + SimpleStrlen(p + 1) : 0;
+}
+
+struct ShortView {
+  constexpr absl::string_view operator()() const {
+    return absl::string_view("SSO string", SimpleStrlen("SSO string"));
+  }
+};
+
+struct LongView {
+  constexpr absl::string_view operator()() const {
+    return absl::string_view("String that does not fit SSO.",
+                             SimpleStrlen("String that does not fit SSO."));
+  }
+};
+
+
+TEST_P(CordTest, ConstinitConstructor) {
+  TestConstinitConstructor(
+      absl::strings_internal::MakeStringConstant(ShortView{}));
+  TestConstinitConstructor(
+      absl::strings_internal::MakeStringConstant(LongView{}));
+}
+
+namespace {
+
+// Test helper that generates a populated cord for future manipulation.
+//
+// By test convention, all generated cords begin with the characters "abcde" at
+// the start of the first chunk.
+class PopulatedCordFactory {
+ public:
+  constexpr PopulatedCordFactory(absl::string_view name,
+                                 absl::Cord (*generator)())
+      : name_(name), generator_(generator) {}
+
+  absl::string_view Name() const { return name_; }
+  absl::Cord Generate() const { return generator_(); }
+
+ private:
+  absl::string_view name_;
+  absl::Cord (*generator_)();
+};
+
+// clang-format off
+// This array is constant-initialized in conformant compilers.
+PopulatedCordFactory cord_factories[] = {
+  {"sso", [] { return absl::Cord("abcde"); }},
+  {"flat", [] {
+    // Too large to live in SSO space, but small enough to be a simple FLAT.
+    absl::Cord flat(absl::StrCat("abcde", std::string(1000, 'x')));
+    flat.Flatten();
+    return flat;
+  }},
+  {"external", [] {
+    // A cheat: we are using a string literal as the external storage, so a
+    // no-op releaser is correct here.
+    return absl::MakeCordFromExternal("abcde External!", []{});
+  }},
+  {"external substring", [] {
+    // A cheat: we are using a string literal as the external storage, so a
+    // no-op releaser is correct here.
+    absl::Cord ext = absl::MakeCordFromExternal("-abcde External!", []{});
+    return absl::CordTestPeer::MakeSubstring(ext, 1, ext.size() - 1);
+  }},
+  {"substring", [] {
+    absl::Cord flat(absl::StrCat("-abcde", std::string(1000, 'x')));
+    flat.Flatten();
+    return flat.Subcord(1, 998);
+  }},
+  {"fragmented", [] {
+    std::string fragment = absl::StrCat("abcde", std::string(195, 'x'));
+    std::vector<std::string> fragments(200, fragment);
+    absl::Cord cord = absl::MakeFragmentedCord(fragments);
+    assert(cord.size() == 40000);
+    return cord;
+  }},
+};
+// clang-format on
+
+// Test helper that can mutate a cord, and possibly undo the mutation, for
+// testing.
+class CordMutator {
+ public:
+  constexpr CordMutator(absl::string_view name, void (*mutate)(absl::Cord&),
+                        void (*undo)(absl::Cord&) = nullptr)
+      : name_(name), mutate_(mutate), undo_(undo) {}
+
+  absl::string_view Name() const { return name_; }
+  void Mutate(absl::Cord& cord) const { mutate_(cord); }
+  bool CanUndo() const { return undo_ != nullptr; }
+  void Undo(absl::Cord& cord) const { undo_(cord); }
+
+ private:
+  absl::string_view name_;
+  void (*mutate_)(absl::Cord&);
+  void (*undo_)(absl::Cord&);
+};
+
+// clang-format off
+// This array is constant-initialized in conformant compilers.
+CordMutator cord_mutators[] = {
+  {"clear", [](absl::Cord& c) { c.Clear(); }},
+  {"overwrite", [](absl::Cord& c) { c = "overwritten"; }},
+  {
+    "append string",
+    [](absl::Cord& c) { c.Append("0123456789"); },
+    [](absl::Cord& c) { c.RemoveSuffix(10); }
+  },
+  {
+    "append cord",
+    [](absl::Cord& c) {
+      c.Append(absl::MakeFragmentedCord({"12345", "67890"}));
+    },
+    [](absl::Cord& c) { c.RemoveSuffix(10); }
+  },
+  {
+    "append checksummed cord",
+    [](absl::Cord& c) {
+      absl::Cord to_append = absl::MakeFragmentedCord({"12345", "67890"});
+      to_append.SetExpectedChecksum(999);
+      c.Append(to_append);
+    },
+    [](absl::Cord& c) { c.RemoveSuffix(10); }
+  },
+  {
+    "append self",
+    [](absl::Cord& c) { c.Append(c); },
+    [](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); }
+  },
+  {
+    "append empty string",
+    [](absl::Cord& c) { c.Append(""); },
+    [](absl::Cord& c) { }
+  },
+  {
+    "append empty cord",
+    [](absl::Cord& c) { c.Append(absl::Cord()); },
+    [](absl::Cord& c) { }
+  },
+  {
+    "append empty checksummed cord",
+    [](absl::Cord& c) {
+      absl::Cord to_append;
+      to_append.SetExpectedChecksum(999);
+      c.Append(to_append);
+    },
+    [](absl::Cord& c) { }
+  },
+  {
+    "prepend string",
+    [](absl::Cord& c) { c.Prepend("9876543210"); },
+    [](absl::Cord& c) { c.RemovePrefix(10); }
+  },
+  {
+    "prepend cord",
+    [](absl::Cord& c) {
+      c.Prepend(absl::MakeFragmentedCord({"98765", "43210"}));
+    },
+    [](absl::Cord& c) { c.RemovePrefix(10); }
+  },
+  {
+    "prepend checksummed cord",
+    [](absl::Cord& c) {
+      absl::Cord to_prepend = absl::MakeFragmentedCord({"98765", "43210"});
+      to_prepend.SetExpectedChecksum(999);
+      c.Prepend(to_prepend);
+    },
+    [](absl::Cord& c) { c.RemovePrefix(10); }
+  },
+  {
+    "prepend empty string",
+    [](absl::Cord& c) { c.Prepend(""); },
+    [](absl::Cord& c) { }
+  },
+  {
+    "prepend empty cord",
+    [](absl::Cord& c) { c.Prepend(absl::Cord()); },
+    [](absl::Cord& c) { }
+  },
+  {
+    "prepend empty checksummed cord",
+    [](absl::Cord& c) {
+      absl::Cord to_prepend;
+      to_prepend.SetExpectedChecksum(999);
+      c.Prepend(to_prepend);
+    },
+    [](absl::Cord& c) { }
+  },
+  {
+    "prepend self",
+    [](absl::Cord& c) { c.Prepend(c); },
+    [](absl::Cord& c) { c.RemovePrefix(c.size() / 2); }
+  },
+  {"remove prefix", [](absl::Cord& c) { c.RemovePrefix(c.size() / 2); }},
+  {"remove suffix", [](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); }},
+  {"remove 0-prefix", [](absl::Cord& c) { c.RemovePrefix(0); }},
+  {"remove 0-suffix", [](absl::Cord& c) { c.RemoveSuffix(0); }},
+  {"subcord", [](absl::Cord& c) { c = c.Subcord(1, c.size() - 2); }},
+  {
+    "swap inline",
+    [](absl::Cord& c) {
+      absl::Cord other("swap");
+      c.swap(other);
+    }
+  },
+  {
+    "swap tree",
+    [](absl::Cord& c) {
+      absl::Cord other(std::string(10000, 'x'));
+      c.swap(other);
+    }
+  },
+};
+// clang-format on
+}  // namespace
+
+TEST_P(CordTest, ExpectedChecksum) {
+  for (const PopulatedCordFactory& factory : cord_factories) {
+    SCOPED_TRACE(factory.Name());
+    for (bool shared : {false, true}) {
+      SCOPED_TRACE(shared);
+
+      absl::Cord shared_cord_source = factory.Generate();
+      auto make_instance = [=] {
+        return shared ? shared_cord_source : factory.Generate();
+      };
+
+      const absl::Cord base_value = factory.Generate();
+      const std::string base_value_as_string(factory.Generate().Flatten());
+
+      absl::Cord c1 = make_instance();
+      EXPECT_FALSE(c1.ExpectedChecksum().has_value());
+
+      // Setting an expected checksum works, and retains the cord's bytes
+      c1.SetExpectedChecksum(12345);
+      EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+      EXPECT_EQ(c1, base_value);
+
+      // Test that setting an expected checksum again doesn't crash or leak
+      // memory.
+      c1.SetExpectedChecksum(12345);
+      EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+      EXPECT_EQ(c1, base_value);
+
+      // CRC persists through copies, assignments, and moves:
+      absl::Cord c1_copy_construct = c1;
+      EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345);
+
+      absl::Cord c1_copy_assign;
+      c1_copy_assign = c1;
+      EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345);
+
+      absl::Cord c1_move(std::move(c1_copy_assign));
+      EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345);
+
+      EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+
+      // A CRC Cord compares equal to its non-CRC value.
+      EXPECT_EQ(c1, make_instance());
+
+      for (const CordMutator& mutator : cord_mutators) {
+        SCOPED_TRACE(mutator.Name());
+
+        // Test that mutating a cord removes its stored checksum
+        absl::Cord c2 = make_instance();
+        c2.SetExpectedChecksum(24680);
+
+        mutator.Mutate(c2);
+
+        if (c1 == c2) {
+          // Not a mutation (for example, appending the empty string).
+          // Whether the checksum is removed is not defined.
+          continue;
+        }
+
+        EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
+
+        if (mutator.CanUndo()) {
+          // Undoing an operation should not restore the checksum
+          mutator.Undo(c2);
+          EXPECT_EQ(c2, base_value);
+          EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
+        }
+      }
+
+      absl::Cord c3 = make_instance();
+      c3.SetExpectedChecksum(999);
+      const absl::Cord& cc3 = c3;
+
+      // Test that all cord reading operations function in the face of an
+      // expected checksum.
+
+      // Test data precondition
+      ASSERT_TRUE(cc3.StartsWith("abcde"));
+
+      EXPECT_EQ(cc3.size(), base_value_as_string.size());
+      EXPECT_FALSE(cc3.empty());
+      EXPECT_EQ(cc3.Compare(base_value), 0);
+      EXPECT_EQ(cc3.Compare(base_value_as_string), 0);
+      EXPECT_EQ(cc3.Compare("wxyz"), -1);
+      EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1);
+      EXPECT_EQ(cc3.Compare("aaaa"), 1);
+      EXPECT_EQ(cc3.Compare(absl::Cord("aaaa")), 1);
+      EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1);
+      EXPECT_EQ(absl::Cord("aaaa").Compare(cc3), -1);
+      EXPECT_TRUE(cc3.StartsWith("abcd"));
+      EXPECT_EQ(std::string(cc3), base_value_as_string);
+
+      std::string dest;
+      absl::CopyCordToString(cc3, &dest);
+      EXPECT_EQ(dest, base_value_as_string);
+
+      bool first_pass = true;
+      for (absl::string_view chunk : cc3.Chunks()) {
+        if (first_pass) {
+          EXPECT_TRUE(absl::StartsWith(chunk, "abcde"));
+        }
+        first_pass = false;
+      }
+      first_pass = true;
+      for (char ch : cc3.Chars()) {
+        if (first_pass) {
+          EXPECT_EQ(ch, 'a');
+        }
+        first_pass = false;
+      }
+      EXPECT_TRUE(absl::StartsWith(*cc3.chunk_begin(), "abcde"));
+      EXPECT_EQ(*cc3.char_begin(), 'a');
+
+      auto char_it = cc3.char_begin();
+      absl::Cord::Advance(&char_it, 2);
+      EXPECT_EQ(absl::Cord::AdvanceAndRead(&char_it, 2), "cd");
+      EXPECT_EQ(*char_it, 'e');
+      char_it = cc3.char_begin();
+      absl::Cord::Advance(&char_it, 2);
+      EXPECT_TRUE(absl::StartsWith(absl::Cord::ChunkRemaining(char_it), "cde"));
+
+      EXPECT_EQ(cc3[0], 'a');
+      EXPECT_EQ(cc3[4], 'e');
+      EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value));
+      EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value_as_string));
+    }
+  }
+}
+
+// Test the special cases encountered with an empty checksummed cord.
+TEST_P(CordTest, ChecksummedEmptyCord) {
+  absl::Cord c1;
+  EXPECT_FALSE(c1.ExpectedChecksum().has_value());
+
+  // Setting an expected checksum works.
+  c1.SetExpectedChecksum(12345);
+  EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+  EXPECT_EQ(c1, "");
+  EXPECT_TRUE(c1.empty());
+
+  // Test that setting an expected checksum again doesn't crash or leak memory.
+  c1.SetExpectedChecksum(12345);
+  EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+  EXPECT_EQ(c1, "");
+  EXPECT_TRUE(c1.empty());
+
+  // CRC persists through copies, assignments, and moves:
+  absl::Cord c1_copy_construct = c1;
+  EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345);
+
+  absl::Cord c1_copy_assign;
+  c1_copy_assign = c1;
+  EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345);
+
+  absl::Cord c1_move(std::move(c1_copy_assign));
+  EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345);
+
+  EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+
+  // A CRC Cord compares equal to its non-CRC value.
+  EXPECT_EQ(c1, absl::Cord());
+
+  for (const CordMutator& mutator : cord_mutators) {
+    SCOPED_TRACE(mutator.Name());
+
+    // Exercise mutating an empty checksummed cord to catch crashes and exercise
+    // memory sanitizers.
+    absl::Cord c2;
+    c2.SetExpectedChecksum(24680);
+    mutator.Mutate(c2);
+
+    if (c2.empty()) {
+      // Not a mutation
+      continue;
+    }
+    EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
+
+    if (mutator.CanUndo()) {
+      mutator.Undo(c2);
+    }
+  }
+
+  absl::Cord c3;
+  c3.SetExpectedChecksum(999);
+  const absl::Cord& cc3 = c3;
+
+  // Test that all cord reading operations function in the face of an
+  // expected checksum.
+  EXPECT_TRUE(cc3.StartsWith(""));
+  EXPECT_TRUE(cc3.EndsWith(""));
+  EXPECT_TRUE(cc3.empty());
+  EXPECT_EQ(cc3, "");
+  EXPECT_EQ(cc3, absl::Cord());
+  EXPECT_EQ(cc3.size(), 0);
+  EXPECT_EQ(cc3.Compare(absl::Cord()), 0);
+  EXPECT_EQ(cc3.Compare(c1), 0);
+  EXPECT_EQ(cc3.Compare(cc3), 0);
+  EXPECT_EQ(cc3.Compare(""), 0);
+  EXPECT_EQ(cc3.Compare("wxyz"), -1);
+  EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1);
+  EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1);
+  EXPECT_EQ(std::string(cc3), "");
+
+  std::string dest;
+  absl::CopyCordToString(cc3, &dest);
+  EXPECT_EQ(dest, "");
+
+  for (absl::string_view chunk : cc3.Chunks()) {  // NOLINT(unreachable loop)
+    static_cast<void>(chunk);
+    GTEST_FAIL() << "no chunks expected";
+  }
+  EXPECT_TRUE(cc3.chunk_begin() == cc3.chunk_end());
+
+  for (char ch : cc3.Chars()) {  // NOLINT(unreachable loop)
+    static_cast<void>(ch);
+    GTEST_FAIL() << "no chars expected";
+  }
+  EXPECT_TRUE(cc3.char_begin() == cc3.char_end());
+
+  EXPECT_EQ(cc3.TryFlat(), "");
+  EXPECT_EQ(absl::HashOf(c3), absl::HashOf(absl::Cord()));
+  EXPECT_EQ(absl::HashOf(c3), absl::HashOf(absl::string_view()));
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && defined(ABSL_INTERNAL_CORD_HAVE_SANITIZER)
+
+// Returns an expected poison / uninitialized death message expression.
+const char* MASanDeathExpr() {
+  return "(use-after-poison|use-of-uninitialized-value)";
+}
+
+TEST(CordSanitizerTest, SanitizesEmptyCord) {
+  absl::Cord cord;
+  const char* data = cord.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[0], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesSmallCord) {
+  absl::Cord cord("Hello");
+  const char* data = cord.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesCordOnSetSSOValue) {
+  absl::Cord cord("String that is too big to be an SSO value");
+  cord = "Hello";
+  const char* data = cord.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesCordOnCopyCtor) {
+  absl::Cord src("hello");
+  absl::Cord dst(src);
+  const char* data = dst.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesCordOnMoveCtor) {
+  absl::Cord src("hello");
+  absl::Cord dst(std::move(src));
+  const char* data = dst.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesCordOnAssign) {
+  absl::Cord src("hello");
+  absl::Cord dst;
+  dst = src;
+  const char* data = dst.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesCordOnMoveAssign) {
+  absl::Cord src("hello");
+  absl::Cord dst;
+  dst = std::move(src);
+  const char* data = dst.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+TEST(CordSanitizerTest, SanitizesCordOnSsoAssign) {
+  absl::Cord src("hello");
+  absl::Cord dst("String that is too big to be an SSO value");
+  dst = src;
+  const char* data = dst.Flatten().data();
+  EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
+}
+
+#endif  // GTEST_HAS_DEATH_TEST && ABSL_INTERNAL_CORD_HAVE_SANITIZER
diff --git a/abseil-cpp/absl/strings/cord_test_helpers.h b/abseil-cpp/absl/strings/cord_test_helpers.h
index f1036e3..ca52240 100644
--- a/abseil-cpp/absl/strings/cord_test_helpers.h
+++ b/abseil-cpp/absl/strings/cord_test_helpers.h
@@ -17,11 +17,73 @@
 #ifndef ABSL_STRINGS_CORD_TEST_HELPERS_H_
 #define ABSL_STRINGS_CORD_TEST_HELPERS_H_
 
+#include <cstdint>
+#include <iostream>
+#include <string>
+
+#include "absl/base/config.h"
 #include "absl/strings/cord.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/string_view.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
+// Cord sizes relevant for testing
+enum class TestCordSize {
+  // An empty value
+  kEmpty = 0,
+
+  // An inlined string value
+  kInlined = cord_internal::kMaxInline / 2 + 1,
+
+  // 'Well known' SSO lengths (excluding terminating zero).
+  // libstdcxx has a maximum SSO of 15, libc++ has a maximum SSO of 22.
+  kStringSso1 = 15,
+  kStringSso2 = 22,
+
+  // A string value which is too large to fit in inlined data, but small enough
+  // such that Cord prefers copying the value if possible, i.e.: not stealing
+  // std::string inputs, or referencing existing CordReps on Append, etc.
+  kSmall = cord_internal::kMaxBytesToCopy / 2 + 1,
+
+  // A string value large enough that Cord prefers to reference or steal from
+  // existing inputs rather than copying contents of the input.
+  kMedium = cord_internal::kMaxFlatLength / 2 + 1,
+
+  // A string value large enough to cause it to be stored in multiple flats.
+  kLarge = cord_internal::kMaxFlatLength * 4
+};
+
+// To string helper
+inline absl::string_view ToString(TestCordSize size) {
+  switch (size) {
+    case TestCordSize::kEmpty:
+      return "Empty";
+    case TestCordSize::kInlined:
+      return "Inlined";
+    case TestCordSize::kSmall:
+      return "Small";
+    case TestCordSize::kStringSso1:
+      return "StringSso1";
+    case TestCordSize::kStringSso2:
+      return "StringSso2";
+    case TestCordSize::kMedium:
+      return "Medium";
+    case TestCordSize::kLarge:
+      return "Large";
+  }
+  return "???";
+}
+
+// Returns the length matching the specified size
+inline size_t Length(TestCordSize size) { return static_cast<size_t>(size); }
+
+// Stream output helper
+inline std::ostream& operator<<(std::ostream& stream, TestCordSize size) {
+  return stream << ToString(size);
+}
+
 // Creates a multi-segment Cord from an iterable container of strings.  The
 // resulting Cord is guaranteed to have one segment for every string in the
 // container.  This allows code to be unit tested with multi-segment Cord
diff --git a/abseil-cpp/absl/strings/cordz_test.cc b/abseil-cpp/absl/strings/cordz_test.cc
new file mode 100644
index 0000000..2b7d30b
--- /dev/null
+++ b/abseil-cpp/absl/strings/cordz_test.cc
@@ -0,0 +1,466 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/cord_test_helpers.h"
+#include "absl/strings/cordz_test_helpers.h"
+#include "absl/strings/internal/cordz_functions.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/strings/internal/cordz_sample_token.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+
+using testing::Eq;
+using testing::AnyOf;
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+using cord_internal::CordzInfo;
+using cord_internal::CordzSampleToken;
+using cord_internal::CordzStatistics;
+using cord_internal::CordzUpdateTracker;
+using Method = CordzUpdateTracker::MethodIdentifier;
+
+// Do not print cord contents, we only care about 'size' perhaps.
+// Note that this method must be inside the named namespace.
+inline void PrintTo(const Cord& cord, std::ostream* s) {
+  if (s) *s << "Cord[" << cord.size() << "]";
+}
+
+namespace {
+
+auto constexpr kMaxInline = cord_internal::kMaxInline;
+
+// Returns a string_view value of the specified length
+// We do this to avoid 'consuming' large strings in Cord by default.
+absl::string_view MakeString(size_t size) {
+  thread_local std::string str;
+  str = std::string(size, '.');
+  return str;
+}
+
+absl::string_view MakeString(TestCordSize size) {
+  return MakeString(Length(size));
+}
+
+// Returns a cord with a sampled method of kAppendString.
+absl::Cord MakeAppendStringCord(TestCordSize size) {
+  CordzSamplingIntervalHelper always(1);
+  absl::Cord cord;
+  cord.Append(MakeString(size));
+  return cord;
+}
+
+std::string TestParamToString(::testing::TestParamInfo<TestCordSize> size) {
+  return absl::StrCat("On", ToString(size.param), "Cord");
+}
+
+class CordzUpdateTest : public testing::TestWithParam<TestCordSize> {
+ public:
+  Cord& cord() { return cord_; }
+
+  Method InitialOr(Method method) const {
+    return (GetParam() > TestCordSize::kInlined) ? Method::kConstructorString
+                                                 : method;
+  }
+
+ private:
+  CordzSamplingIntervalHelper sample_every_{1};
+  Cord cord_{MakeString(GetParam())};
+};
+
+template <typename T>
+std::string ParamToString(::testing::TestParamInfo<T> param) {
+  return std::string(ToString(param.param));
+}
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordzUpdateTest,
+                         testing::Values(TestCordSize::kEmpty,
+                                         TestCordSize::kInlined,
+                                         TestCordSize::kLarge),
+                         TestParamToString);
+
+class CordzStringTest : public testing::TestWithParam<TestCordSize> {
+ private:
+  CordzSamplingIntervalHelper sample_every_{1};
+};
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordzStringTest,
+                         testing::Values(TestCordSize::kInlined,
+                                         TestCordSize::kStringSso1,
+                                         TestCordSize::kStringSso2,
+                                         TestCordSize::kSmall,
+                                         TestCordSize::kLarge),
+                         ParamToString<TestCordSize>);
+
+TEST(CordzTest, ConstructSmallArray) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord(MakeString(TestCordSize::kSmall));
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+}
+
+TEST(CordzTest, ConstructLargeArray) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord(MakeString(TestCordSize::kLarge));
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+}
+
+TEST_P(CordzStringTest, ConstructString) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord(std::string(Length(GetParam()), '.'));
+  if (Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  }
+}
+
+TEST(CordzTest, CopyConstructFromUnsampled) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  Cord cord(src);
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+}
+
+TEST(CordzTest, CopyConstructFromSampled) {
+  CordzSamplingIntervalHelper sample_never{99999};
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  Cord cord(src);
+  ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+}
+
+TEST(CordzTest, MoveConstruct) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord src(MakeString(TestCordSize::kLarge));
+  Cord cord(std::move(src));
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+}
+
+TEST_P(CordzUpdateTest, AssignUnsampledCord) {
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  const CordzInfo* info = GetCordzInfoForTesting(cord());
+  cord() = src;
+  EXPECT_THAT(GetCordzInfoForTesting(cord()), Eq(nullptr));
+  EXPECT_FALSE(CordzInfoIsListed(info));
+}
+
+TEST_P(CordzUpdateTest, AssignSampledCord) {
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  cord() = src;
+  ASSERT_THAT(cord(), HasValidCordzInfoOf(Method::kAssignCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord())->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0));
+}
+
+TEST(CordzUpdateTest, AssignSampledCordToInlined) {
+  CordzSamplingIntervalHelper sample_never{99999};
+  Cord cord;
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  cord = src;
+  ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0));
+}
+
+TEST(CordzUpdateTest, AssignSampledCordToUnsampledCord) {
+  CordzSamplingIntervalHelper sample_never{99999};
+  Cord cord = UnsampledCord(MakeString(TestCordSize::kLarge));
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  cord = src;
+  ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0));
+}
+
+TEST(CordzUpdateTest, AssignUnsampledCordToSampledCordWithoutSampling) {
+  CordzSamplingIntervalHelper sample_never{99999};
+  Cord cord = MakeAppendStringCord(TestCordSize::kLarge);
+  const CordzInfo* info = GetCordzInfoForTesting(cord);
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  cord = src;
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+  EXPECT_FALSE(CordzInfoIsListed(info));
+}
+
+TEST(CordzUpdateTest, AssignUnsampledCordToSampledCordWithSampling) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord = MakeAppendStringCord(TestCordSize::kLarge);
+  const CordzInfo* info = GetCordzInfoForTesting(cord);
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  cord = src;
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+  EXPECT_FALSE(CordzInfoIsListed(info));
+}
+
+TEST(CordzUpdateTest, AssignSampledCordToSampledCord) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  Cord cord(MakeString(TestCordSize::kLarge));
+  cord = src;
+  ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0));
+}
+
+TEST(CordzUpdateTest, AssignUnsampledCordToSampledCord) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  Cord cord(MakeString(TestCordSize::kLarge));
+  cord = src;
+  ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0));
+}
+
+TEST(CordzTest, AssignInlinedCordToSampledCord) {
+  CordzSampleToken token;
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord(MakeString(TestCordSize::kLarge));
+  const CordzInfo* info = GetCordzInfoForTesting(cord);
+  Cord src = UnsampledCord(MakeString(TestCordSize::kInlined));
+  cord = src;
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+  EXPECT_FALSE(CordzInfoIsListed(info));
+}
+
+TEST(CordzUpdateTest, MoveAssignCord) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord;
+  Cord src(MakeString(TestCordSize::kLarge));
+  cord = std::move(src);
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+}
+
+TEST_P(CordzUpdateTest, AssignLargeArray) {
+  cord() = MakeString(TestCordSize::kSmall);
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(Method::kAssignString));
+}
+
+TEST_P(CordzUpdateTest, AssignSmallArray) {
+  cord() = MakeString(TestCordSize::kSmall);
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(Method::kAssignString));
+}
+
+TEST_P(CordzUpdateTest, AssignInlinedArray) {
+  cord() = MakeString(TestCordSize::kInlined);
+  EXPECT_THAT(GetCordzInfoForTesting(cord()), Eq(nullptr));
+}
+
+TEST_P(CordzStringTest, AssignStringToInlined) {
+  Cord cord;
+  cord = std::string(Length(GetParam()), '.');
+  if (Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kAssignString));
+  }
+}
+
+TEST_P(CordzStringTest, AssignStringToCord) {
+  Cord cord(MakeString(TestCordSize::kLarge));
+  cord = std::string(Length(GetParam()), '.');
+  if (Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+    EXPECT_THAT(cord, CordzMethodCountEq(Method::kAssignString, 1));
+  }
+}
+
+TEST_P(CordzUpdateTest, AssignInlinedString) {
+  cord() = std::string(Length(TestCordSize::kInlined), '.');
+  EXPECT_THAT(GetCordzInfoForTesting(cord()), Eq(nullptr));
+}
+
+TEST_P(CordzUpdateTest, AppendCord) {
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  cord().Append(src);
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kAppendCord)));
+}
+
+TEST_P(CordzUpdateTest, MoveAppendCord) {
+  cord().Append(UnsampledCord(MakeString(TestCordSize::kLarge)));
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kAppendCord)));
+}
+
+TEST_P(CordzUpdateTest, AppendSmallArray) {
+  cord().Append(MakeString(TestCordSize::kSmall));
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kAppendString)));
+}
+
+TEST_P(CordzUpdateTest, AppendLargeArray) {
+  cord().Append(MakeString(TestCordSize::kLarge));
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kAppendString)));
+}
+
+TEST_P(CordzStringTest, AppendStringToEmpty) {
+  Cord cord;
+  cord.Append(std::string(Length(GetParam()), '.'));
+  if (Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kAppendString));
+  }
+}
+
+TEST_P(CordzStringTest, AppendStringToInlined) {
+  Cord cord(MakeString(TestCordSize::kInlined));
+  cord.Append(std::string(Length(GetParam()), '.'));
+  if (Length(TestCordSize::kInlined) + Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kAppendString));
+  }
+}
+
+TEST_P(CordzStringTest, AppendStringToCord) {
+  Cord cord(MakeString(TestCordSize::kLarge));
+  cord.Append(std::string(Length(GetParam()), '.'));
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  EXPECT_THAT(cord, CordzMethodCountEq(Method::kAppendString, 1));
+}
+
+TEST(CordzTest, MakeCordFromExternal) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord = MakeCordFromExternal("Hello world", [](absl::string_view) {});
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kMakeCordFromExternal));
+}
+
+TEST(CordzTest, MakeCordFromEmptyExternal) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord cord = MakeCordFromExternal({}, [](absl::string_view) {});
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+}
+
+TEST_P(CordzUpdateTest, PrependCord) {
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  cord().Prepend(src);
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kPrependCord)));
+}
+
+TEST_P(CordzUpdateTest, PrependSmallArray) {
+  cord().Prepend(MakeString(TestCordSize::kSmall));
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kPrependString)));
+}
+
+TEST_P(CordzUpdateTest, PrependLargeArray) {
+  cord().Prepend(MakeString(TestCordSize::kLarge));
+  EXPECT_THAT(cord(), HasValidCordzInfoOf(InitialOr(Method::kPrependString)));
+}
+
+TEST_P(CordzStringTest, PrependStringToEmpty) {
+  Cord cord;
+  cord.Prepend(std::string(Length(GetParam()), '.'));
+  if (Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kPrependString));
+  }
+}
+
+TEST_P(CordzStringTest, PrependStringToInlined) {
+  Cord cord(MakeString(TestCordSize::kInlined));
+  cord.Prepend(std::string(Length(GetParam()), '.'));
+  if (Length(TestCordSize::kInlined) + Length(GetParam()) > kMaxInline) {
+    EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kPrependString));
+  }
+}
+
+TEST_P(CordzStringTest, PrependStringToCord) {
+  Cord cord(MakeString(TestCordSize::kLarge));
+  cord.Prepend(std::string(Length(GetParam()), '.'));
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  EXPECT_THAT(cord, CordzMethodCountEq(Method::kPrependString, 1));
+}
+
+TEST(CordzTest, RemovePrefix) {
+  CordzSamplingIntervalHelper sample_every(1);
+  Cord cord(MakeString(TestCordSize::kLarge));
+
+  // Half the cord
+  cord.RemovePrefix(cord.size() / 2);
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  EXPECT_THAT(cord, CordzMethodCountEq(Method::kRemovePrefix, 1));
+
+  // TODO(mvels): RemovePrefix does not reset to inlined, except if empty?
+  cord.RemovePrefix(cord.size() - kMaxInline);
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  EXPECT_THAT(cord, CordzMethodCountEq(Method::kRemovePrefix, 2));
+
+  cord.RemovePrefix(cord.size());
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+}
+
+TEST(CordzTest, RemoveSuffix) {
+  CordzSamplingIntervalHelper sample_every(1);
+  Cord cord(MakeString(TestCordSize::kLarge));
+
+  // Half the cord
+  cord.RemoveSuffix(cord.size() / 2);
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  EXPECT_THAT(cord, CordzMethodCountEq(Method::kRemoveSuffix, 1));
+
+  // TODO(mvels): RemoveSuffix does not reset to inlined, except if empty?
+  cord.RemoveSuffix(cord.size() - kMaxInline);
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString));
+  EXPECT_THAT(cord, CordzMethodCountEq(Method::kRemoveSuffix, 2));
+
+  cord.RemoveSuffix(cord.size());
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+}
+
+TEST(CordzTest, SubCordFromUnsampledCord) {
+  CordzSamplingIntervalHelper sample_every{1};
+  Cord src = UnsampledCord(MakeString(TestCordSize::kLarge));
+  Cord cord = src.Subcord(10, src.size() / 2);
+  EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr));
+}
+
+TEST(CordzTest, SubCordFromSampledCord) {
+  CordzSamplingIntervalHelper sample_never{99999};
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  Cord cord = src.Subcord(10, src.size() / 2);
+  ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord));
+  CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics();
+  EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString));
+  EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1));
+}
+
+TEST(CordzTest, SmallSubCord) {
+  CordzSamplingIntervalHelper sample_never{99999};
+  Cord src = MakeAppendStringCord(TestCordSize::kLarge);
+  Cord cord = src.Subcord(10, kMaxInline + 1);
+  EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord));
+}
+
+}  // namespace
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_CORDZ_ENABLED
diff --git a/abseil-cpp/absl/strings/cordz_test_helpers.h b/abseil-cpp/absl/strings/cordz_test_helpers.h
new file mode 100644
index 0000000..e410eec
--- /dev/null
+++ b/abseil-cpp/absl/strings/cordz_test_helpers.h
@@ -0,0 +1,151 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
+#define ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
+
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/strings/internal/cordz_sample_token.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+#include "absl/strings/str_cat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// Returns the CordzInfo for the cord, or nullptr if the cord is not sampled.
+inline const cord_internal::CordzInfo* GetCordzInfoForTesting(
+    const Cord& cord) {
+  if (!cord.contents_.is_tree()) return nullptr;
+  return cord.contents_.cordz_info();
+}
+
+// Returns true if the provided cordz_info is in the list of sampled cords.
+inline bool CordzInfoIsListed(const cord_internal::CordzInfo* cordz_info,
+                              cord_internal::CordzSampleToken token = {}) {
+  for (const cord_internal::CordzInfo& info : token) {
+    if (cordz_info == &info) return true;
+  }
+  return false;
+}
+
+// Matcher on Cord that verifies all of:
+// - the cord is sampled
+// - the CordzInfo of the cord is listed / discoverable.
+// - the reported CordzStatistics match the cord's actual properties
+// - the cord has an (initial) UpdateTracker count of 1 for `method`
+MATCHER_P(HasValidCordzInfoOf, method, "CordzInfo matches cord") {
+  const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg);
+  if (cord_info == nullptr) {
+    *result_listener << "cord is not sampled";
+    return false;
+  }
+  if (!CordzInfoIsListed(cord_info)) {
+    *result_listener << "cord is sampled, but not listed";
+    return false;
+  }
+  cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics();
+  if (stat.size != arg.size()) {
+    *result_listener << "cordz size " << stat.size
+                     << " does not match cord size " << arg.size();
+    return false;
+  }
+  if (stat.update_tracker.Value(method) != 1) {
+    *result_listener << "Expected method count 1 for " << method << ", found "
+                     << stat.update_tracker.Value(method);
+    return false;
+  }
+  return true;
+}
+
+// Matcher on Cord that verifies that the cord is sampled and that the CordzInfo
+// update tracker has 'method' with a call count of 'n'
+MATCHER_P2(CordzMethodCountEq, method, n,
+           absl::StrCat("CordzInfo method count equals ", n)) {
+  const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg);
+  if (cord_info == nullptr) {
+    *result_listener << "cord is not sampled";
+    return false;
+  }
+  cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics();
+  if (stat.update_tracker.Value(method) != n) {
+    *result_listener << "Expected method count " << n << " for " << method
+                     << ", found " << stat.update_tracker.Value(method);
+    return false;
+  }
+  return true;
+}
+
+// Cordz will only update with a new rate once the previously scheduled event
+// has fired. When we disable Cordz, a long delay takes place where we won't
+// consider profiling new Cords. CordzSampleIntervalHelper will burn through
+// that interval and allow for testing that assumes that the average sampling
+// interval is a particular value.
+class CordzSamplingIntervalHelper {
+ public:
+  explicit CordzSamplingIntervalHelper(int32_t interval)
+      : orig_mean_interval_(absl::cord_internal::get_cordz_mean_interval()) {
+    absl::cord_internal::set_cordz_mean_interval(interval);
+    absl::cord_internal::cordz_set_next_sample_for_testing(interval);
+  }
+
+  ~CordzSamplingIntervalHelper() {
+    absl::cord_internal::set_cordz_mean_interval(orig_mean_interval_);
+    absl::cord_internal::cordz_set_next_sample_for_testing(orig_mean_interval_);
+  }
+
+ private:
+  int32_t orig_mean_interval_;
+};
+
+// Wrapper struct managing a small CordRep `rep`
+struct TestCordRep {
+  cord_internal::CordRepFlat* rep;
+
+  TestCordRep() {
+    rep = cord_internal::CordRepFlat::New(100);
+    rep->length = 100;
+    memset(rep->Data(), 1, 100);
+  }
+  ~TestCordRep() { cord_internal::CordRep::Unref(rep); }
+};
+
+// Wrapper struct managing a small CordRep `rep`, and
+// an InlineData `data` initialized with that CordRep.
+struct TestCordData {
+  TestCordRep rep;
+  cord_internal::InlineData data{rep.rep};
+};
+
+// Creates a Cord that is not sampled
+template <typename... Args>
+Cord UnsampledCord(Args... args) {
+  CordzSamplingIntervalHelper never(9999);
+  Cord cord(std::forward<Args>(args)...);
+  ABSL_ASSERT(GetCordzInfoForTesting(cord) == nullptr);
+  return cord;
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
diff --git a/abseil-cpp/absl/strings/escaping.cc b/abseil-cpp/absl/strings/escaping.cc
index 9fceeef..2827fba 100644
--- a/abseil-cpp/absl/strings/escaping.cc
+++ b/abseil-cpp/absl/strings/escaping.cc
@@ -42,11 +42,11 @@
 
 inline bool is_octal_digit(char c) { return ('0' <= c) && (c <= '7'); }
 
-inline int hex_digit_to_int(char c) {
+inline unsigned int hex_digit_to_int(char c) {
   static_assert('0' == 0x30 && 'A' == 0x41 && 'a' == 0x61,
                 "Character set must be ASCII.");
-  assert(absl::ascii_isxdigit(c));
-  int x = static_cast<unsigned char>(c);
+  assert(absl::ascii_isxdigit(static_cast<unsigned char>(c)));
+  unsigned int x = static_cast<unsigned char>(c);
   if (x > '9') {
     x += 9;
   }
@@ -121,27 +121,29 @@
         case '7': {
           // octal digit: 1 to 3 digits
           const char* octal_start = p;
-          unsigned int ch = *p - '0';
-          if (p < last_byte && is_octal_digit(p[1])) ch = ch * 8 + *++p - '0';
+          unsigned int ch = static_cast<unsigned int>(*p - '0');  // digit 1
           if (p < last_byte && is_octal_digit(p[1]))
-            ch = ch * 8 + *++p - '0';      // now points at last digit
+            ch = ch * 8 + static_cast<unsigned int>(*++p - '0');  // digit 2
+          if (p < last_byte && is_octal_digit(p[1]))
+            ch = ch * 8 + static_cast<unsigned int>(*++p - '0');  // digit 3
           if (ch > 0xff) {
             if (error) {
               *error = "Value of \\" +
-                       std::string(octal_start, p + 1 - octal_start) +
+                       std::string(octal_start,
+                                   static_cast<size_t>(p + 1 - octal_start)) +
                        " exceeds 0xff";
             }
             return false;
           }
           if ((ch == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
-            const ptrdiff_t octal_size = p + 1 - octal_start;
+            const size_t octal_size = static_cast<size_t>(p + 1 - octal_start);
             *d++ = '\\';
-            memcpy(d, octal_start, octal_size);
+            memmove(d, octal_start, octal_size);
             d += octal_size;
             break;
           }
-          *d++ = ch;
+          *d++ = static_cast<char>(ch);
           break;
         }
         case 'x':
@@ -149,32 +151,34 @@
           if (p >= last_byte) {
             if (error) *error = "String cannot end with \\x";
             return false;
-          } else if (!absl::ascii_isxdigit(p[1])) {
+          } else if (!absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
             if (error) *error = "\\x cannot be followed by a non-hex digit";
             return false;
           }
           unsigned int ch = 0;
           const char* hex_start = p;
-          while (p < last_byte && absl::ascii_isxdigit(p[1]))
+          while (p < last_byte &&
+                 absl::ascii_isxdigit(static_cast<unsigned char>(p[1])))
             // Arbitrarily many hex digits
             ch = (ch << 4) + hex_digit_to_int(*++p);
           if (ch > 0xFF) {
             if (error) {
               *error = "Value of \\" +
-                       std::string(hex_start, p + 1 - hex_start) +
+                       std::string(hex_start,
+                                   static_cast<size_t>(p + 1 - hex_start)) +
                        " exceeds 0xff";
             }
             return false;
           }
           if ((ch == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
-            const ptrdiff_t hex_size = p + 1 - hex_start;
+            const size_t hex_size = static_cast<size_t>(p + 1 - hex_start);
             *d++ = '\\';
-            memcpy(d, hex_start, hex_size);
+            memmove(d, hex_start, hex_size);
             d += hex_size;
             break;
           }
-          *d++ = ch;
+          *d++ = static_cast<char>(ch);
           break;
         }
         case 'u': {
@@ -184,18 +188,20 @@
           if (p + 4 >= end) {
             if (error) {
               *error = "\\u must be followed by 4 hex digits: \\" +
-                       std::string(hex_start, p + 1 - hex_start);
+                       std::string(hex_start,
+                                   static_cast<size_t>(p + 1 - hex_start));
             }
             return false;
           }
           for (int i = 0; i < 4; ++i) {
             // Look one char ahead.
-            if (absl::ascii_isxdigit(p[1])) {
+            if (absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
               rune = (rune << 4) + hex_digit_to_int(*++p);  // Advance p.
             } else {
               if (error) {
                 *error = "\\u must be followed by 4 hex digits: \\" +
-                         std::string(hex_start, p + 1 - hex_start);
+                         std::string(hex_start,
+                                     static_cast<size_t>(p + 1 - hex_start));
               }
               return false;
             }
@@ -203,7 +209,7 @@
           if ((rune == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
             *d++ = '\\';
-            memcpy(d, hex_start, 5);  // u0000
+            memmove(d, hex_start, 5);  // u0000
             d += 5;
             break;
           }
@@ -220,20 +226,22 @@
           if (p + 8 >= end) {
             if (error) {
               *error = "\\U must be followed by 8 hex digits: \\" +
-                       std::string(hex_start, p + 1 - hex_start);
+                       std::string(hex_start,
+                                   static_cast<size_t>(p + 1 - hex_start));
             }
             return false;
           }
           for (int i = 0; i < 8; ++i) {
             // Look one char ahead.
-            if (absl::ascii_isxdigit(p[1])) {
+            if (absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
               // Don't change rune until we're sure this
               // is within the Unicode limit, but do advance p.
               uint32_t newrune = (rune << 4) + hex_digit_to_int(*++p);
               if (newrune > 0x10FFFF) {
                 if (error) {
                   *error = "Value of \\" +
-                           std::string(hex_start, p + 1 - hex_start) +
+                           std::string(hex_start,
+                                       static_cast<size_t>(p + 1 - hex_start)) +
                            " exceeds Unicode limit (0x10FFFF)";
                 }
                 return false;
@@ -243,7 +251,8 @@
             } else {
               if (error) {
                 *error = "\\U must be followed by 8 hex digits: \\" +
-                         std::string(hex_start, p + 1 - hex_start);
+                         std::string(hex_start,
+                                     static_cast<size_t>(p + 1 - hex_start));
               }
               return false;
             }
@@ -251,7 +260,7 @@
           if ((rune == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
             *d++ = '\\';
-            memcpy(d, hex_start, 9);  // U00000000
+            memmove(d, hex_start, 9);  // U00000000
             d += 9;
             break;
           }
@@ -291,7 +300,7 @@
                          error)) {
     return false;
   }
-  dest->erase(dest_size);
+  dest->erase(static_cast<size_t>(dest_size));
   return true;
 }
 
@@ -311,7 +320,7 @@
   std::string dest;
   bool last_hex_escape = false;  // true if last output char was \xNN.
 
-  for (unsigned char c : src) {
+  for (char c : src) {
     bool is_hex_escape = false;
     switch (c) {
       case '\n': dest.append("\\" "n"); break;
@@ -320,28 +329,30 @@
       case '\"': dest.append("\\" "\""); break;
       case '\'': dest.append("\\" "'"); break;
       case '\\': dest.append("\\" "\\"); break;
-      default:
+      default: {
         // Note that if we emit \xNN and the src character after that is a hex
         // digit then that digit must be escaped too to prevent it being
         // interpreted as part of the character code by C.
-        if ((!utf8_safe || c < 0x80) &&
-            (!absl::ascii_isprint(c) ||
-             (last_hex_escape && absl::ascii_isxdigit(c)))) {
+        const unsigned char uc = static_cast<unsigned char>(c);
+        if ((!utf8_safe || uc < 0x80) &&
+            (!absl::ascii_isprint(uc) ||
+             (last_hex_escape && absl::ascii_isxdigit(uc)))) {
           if (use_hex) {
             dest.append("\\" "x");
-            dest.push_back(numbers_internal::kHexChar[c / 16]);
-            dest.push_back(numbers_internal::kHexChar[c % 16]);
+            dest.push_back(numbers_internal::kHexChar[uc / 16]);
+            dest.push_back(numbers_internal::kHexChar[uc % 16]);
             is_hex_escape = true;
           } else {
             dest.append("\\");
-            dest.push_back(numbers_internal::kHexChar[c / 64]);
-            dest.push_back(numbers_internal::kHexChar[(c % 64) / 8]);
-            dest.push_back(numbers_internal::kHexChar[c % 8]);
+            dest.push_back(numbers_internal::kHexChar[uc / 64]);
+            dest.push_back(numbers_internal::kHexChar[(uc % 64) / 8]);
+            dest.push_back(numbers_internal::kHexChar[uc % 8]);
           }
         } else {
           dest.push_back(c);
           break;
         }
+      }
     }
     last_hex_escape = is_hex_escape;
   }
@@ -350,7 +361,7 @@
 }
 
 /* clang-format off */
-constexpr char c_escaped_len[256] = {
+constexpr unsigned char c_escaped_len[256] = {
     4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 2, 4, 4,  // \t, \n, \r
     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
     1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,  // ", '
@@ -375,7 +386,8 @@
 // that UTF-8 bytes are not handled specially.
 inline size_t CEscapedLength(absl::string_view src) {
   size_t escaped_len = 0;
-  for (unsigned char c : src) escaped_len += c_escaped_len[c];
+  for (char c : src)
+    escaped_len += c_escaped_len[static_cast<unsigned char>(c)];
   return escaped_len;
 }
 
@@ -391,8 +403,8 @@
                                                  cur_dest_len + escaped_len);
   char* append_ptr = &(*dest)[cur_dest_len];
 
-  for (unsigned char c : src) {
-    int char_len = c_escaped_len[c];
+  for (char c : src) {
+    size_t char_len = c_escaped_len[static_cast<unsigned char>(c)];
     if (char_len == 1) {
       *append_ptr++ = c;
     } else if (char_len == 2) {
@@ -424,13 +436,15 @@
       }
     } else {
       *append_ptr++ = '\\';
-      *append_ptr++ = '0' + c / 64;
-      *append_ptr++ = '0' + (c % 64) / 8;
-      *append_ptr++ = '0' + c % 8;
+      *append_ptr++ = '0' + static_cast<unsigned char>(c) / 64;
+      *append_ptr++ = '0' + (static_cast<unsigned char>(c) % 64) / 8;
+      *append_ptr++ = '0' + static_cast<unsigned char>(c) % 8;
     }
   }
 }
 
+// Reverses the mapping in Base64EscapeInternal; see that method's
+// documentation for details of the mapping.
 bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
                             size_t szdest, const signed char* unbase64,
                             size_t* len) {
@@ -440,7 +454,7 @@
   size_t destidx = 0;
   int decode = 0;
   int state = 0;
-  unsigned int ch = 0;
+  unsigned char ch = 0;
   unsigned int temp = 0;
 
   // If "char" is signed by default, using *src as an array index results in
@@ -500,13 +514,13 @@
         // how to handle those cases.
 
         GET_INPUT(first, 4);
-        temp = decode;
+        temp = static_cast<unsigned char>(decode);
         GET_INPUT(second, 3);
-        temp = (temp << 6) | decode;
+        temp = (temp << 6) | static_cast<unsigned char>(decode);
         GET_INPUT(third, 2);
-        temp = (temp << 6) | decode;
+        temp = (temp << 6) | static_cast<unsigned char>(decode);
         GET_INPUT(fourth, 1);
-        temp = (temp << 6) | decode;
+        temp = (temp << 6) | static_cast<unsigned char>(decode);
       } else {
         // We really did have four good data bytes, so advance four
         // characters in the string.
@@ -518,11 +532,11 @@
       // temp has 24 bits of input, so write that out as three bytes.
 
       if (destidx + 3 > szdest) return false;
-      dest[destidx + 2] = temp;
+      dest[destidx + 2] = static_cast<char>(temp);
       temp >>= 8;
-      dest[destidx + 1] = temp;
+      dest[destidx + 1] = static_cast<char>(temp);
       temp >>= 8;
-      dest[destidx] = temp;
+      dest[destidx] = static_cast<char>(temp);
       destidx += 3;
     }
   } else {
@@ -583,18 +597,18 @@
       }
 
       // Each input character gives us six bits of output.
-      temp = (temp << 6) | decode;
+      temp = (temp << 6) | static_cast<unsigned char>(decode);
       ++state;
       if (state == 4) {
         // If we've accumulated 24 bits of output, write that out as
         // three bytes.
         if (dest) {
           if (destidx + 3 > szdest) return false;
-          dest[destidx + 2] = temp;
+          dest[destidx + 2] = static_cast<char>(temp);
           temp >>= 8;
-          dest[destidx + 1] = temp;
+          dest[destidx + 1] = static_cast<char>(temp);
           temp >>= 8;
-          dest[destidx] = temp;
+          dest[destidx] = static_cast<char>(temp);
         }
         destidx += 3;
         state = 0;
@@ -619,7 +633,7 @@
       if (dest) {
         if (destidx + 1 > szdest) return false;
         temp >>= 4;
-        dest[destidx] = temp;
+        dest[destidx] = static_cast<char>(temp);
       }
       ++destidx;
       expected_equals = 2;
@@ -630,9 +644,9 @@
       if (dest) {
         if (destidx + 2 > szdest) return false;
         temp >>= 2;
-        dest[destidx + 1] = temp;
+        dest[destidx + 1] = static_cast<char>(temp);
         temp >>= 8;
-        dest[destidx] = temp;
+        dest[destidx] = static_cast<char>(temp);
       }
       destidx += 2;
       expected_equals = 1;
@@ -664,7 +678,10 @@
   return ok;
 }
 
-// The arrays below were generated by the following code
+// The arrays below map base64-escaped characters back to their original values.
+// For the inverse case, see k(WebSafe)Base64Chars in the internal
+// escaping.cc.
+// These arrays were generated by the following inversion code:
 // #include <sys/time.h>
 // #include <stdlib.h>
 // #include <string.h>
@@ -691,8 +708,8 @@
 //   }
 // }
 //
-// where the value of "Base64[]" was replaced by one of the base-64 conversion
-// tables from the functions below.
+// where the value of "Base64[]" was replaced by one of k(WebSafe)Base64Chars
+// in the internal escaping.cc.
 /* clang-format off */
 constexpr signed char kUnBase64[] = {
     -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
@@ -765,15 +782,11 @@
 };
 /* clang-format on */
 
-constexpr char kWebSafeBase64Chars[] =
-    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
 template <typename String>
 bool Base64UnescapeInternal(const char* src, size_t slen, String* dest,
                             const signed char* unbase64) {
   // Determine the size of the output string.  Base64 encodes every 3 bytes into
-  // 4 characters.  any leftover chars are added directly for good measure.
-  // This is documented in the base64 RFC: http://tools.ietf.org/html/rfc3548
+  // 4 characters.  Any leftover chars are added directly for good measure.
   const size_t dest_len = 3 * (slen / 4) + (slen % 4);
 
   strings_internal::STLStringResizeUninitialized(dest, dest_len);
@@ -821,9 +834,9 @@
 // or a string.  This works because we use the [] operator to access
 // individual characters at a time.
 template <typename T>
-void HexStringToBytesInternal(const char* from, T to, ptrdiff_t num) {
-  for (int i = 0; i < num; i++) {
-    to[i] = (kHexValueLenient[from[i * 2] & 0xFF] << 4) +
+void HexStringToBytesInternal(const char* from, T to, size_t num) {
+  for (size_t i = 0; i < num; i++) {
+    to[i] = static_cast<char>(kHexValueLenient[from[i * 2] & 0xFF] << 4) +
             (kHexValueLenient[from[i * 2 + 1] & 0xFF]);
   }
 }
@@ -831,7 +844,7 @@
 // This is a templated function so that T can be either a char* or a
 // std::string.
 template <typename T>
-void BytesToHexStringInternal(const unsigned char* src, T dest, ptrdiff_t num) {
+void BytesToHexStringInternal(const unsigned char* src, T dest, size_t num) {
   auto dest_ptr = &dest[0];
   for (auto src_ptr = src; src_ptr != (src + num); ++src_ptr, dest_ptr += 2) {
     const char* hex_p = &numbers_internal::kHexTable[*src_ptr * 2];
@@ -869,30 +882,6 @@
   return CEscapeInternal(src, true, true);
 }
 
-// ----------------------------------------------------------------------
-// Base64Unescape() - base64 decoder
-// Base64Escape() - base64 encoder
-// WebSafeBase64Unescape() - Google's variation of base64 decoder
-// WebSafeBase64Escape() - Google's variation of base64 encoder
-//
-// Check out
-// http://tools.ietf.org/html/rfc2045 for formal description, but what we
-// care about is that...
-//   Take the encoded stuff in groups of 4 characters and turn each
-//   character into a code 0 to 63 thus:
-//           A-Z map to 0 to 25
-//           a-z map to 26 to 51
-//           0-9 map to 52 to 61
-//           +(- for WebSafe) maps to 62
-//           /(_ for WebSafe) maps to 63
-//   There will be four numbers, all less than 64 which can be represented
-//   by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
-//   Arrange the 6 digit binary numbers into three bytes as such:
-//   aaaaaabb bbbbcccc ccdddddd
-//   Equals signs (one or two) are used at the end of the encoded block to
-//   indicate that the text was not an integer multiple of three bytes long.
-// ----------------------------------------------------------------------
-
 bool Base64Unescape(absl::string_view src, std::string* dest) {
   return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
 }
@@ -910,7 +899,7 @@
 void WebSafeBase64Escape(absl::string_view src, std::string* dest) {
   strings_internal::Base64EscapeInternal(
       reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
-      false, kWebSafeBase64Chars);
+      false, strings_internal::kWebSafeBase64Chars);
 }
 
 std::string Base64Escape(absl::string_view src) {
@@ -925,7 +914,7 @@
   std::string dest;
   strings_internal::Base64EscapeInternal(
       reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
-      false, kWebSafeBase64Chars);
+      false, strings_internal::kWebSafeBase64Chars);
   return dest;
 }
 
diff --git a/abseil-cpp/absl/strings/escaping.h b/abseil-cpp/absl/strings/escaping.h
index f5ca26c..bf2a589 100644
--- a/abseil-cpp/absl/strings/escaping.h
+++ b/abseil-cpp/absl/strings/escaping.h
@@ -117,35 +117,40 @@
 // conversion.
 std::string Utf8SafeCHexEscape(absl::string_view src);
 
-// Base64Unescape()
-//
-// Converts a `src` string encoded in Base64 to its binary equivalent, writing
-// it to a `dest` buffer, returning `true` on success. If `src` contains invalid
-// characters, `dest` is cleared and returns `false`.
-bool Base64Unescape(absl::string_view src, std::string* dest);
-
-// WebSafeBase64Unescape()
-//
-// Converts a `src` string encoded in Base64 to its binary equivalent, writing
-// it to a `dest` buffer, but using '-' instead of '+', and '_' instead of '/'.
-// If `src` contains invalid characters, `dest` is cleared and returns `false`.
-bool WebSafeBase64Unescape(absl::string_view src, std::string* dest);
-
 // Base64Escape()
 //
-// Encodes a `src` string into a base64-encoded string, with padding characters.
-// This function conforms with RFC 4648 section 4 (base64).
+// Encodes a `src` string into a base64-encoded 'dest' string with padding
+// characters. This function conforms with RFC 4648 section 4 (base64) and RFC
+// 2045.
 void Base64Escape(absl::string_view src, std::string* dest);
 std::string Base64Escape(absl::string_view src);
 
 // WebSafeBase64Escape()
 //
-// Encodes a `src` string into a base64-like string, using '-' instead of '+'
-// and '_' instead of '/', and without padding. This function conforms with RFC
-// 4648 section 5 (base64url).
+// Encodes a `src` string into a base64 string, like Base64Escape() does, but
+// outputs '-' instead of '+' and '_' instead of '/', and does not pad 'dest'.
+// This function conforms with RFC 4648 section 5 (base64url).
 void WebSafeBase64Escape(absl::string_view src, std::string* dest);
 std::string WebSafeBase64Escape(absl::string_view src);
 
+// Base64Unescape()
+//
+// Converts a `src` string encoded in Base64 (RFC 4648 section 4) to its binary
+// equivalent, writing it to a `dest` buffer, returning `true` on success. If
+// `src` contains invalid characters, `dest` is cleared and returns `false`.
+// If padding is included (note that `Base64Escape()` does produce it), it must
+// be correct. In the padding, '=' and '.' are treated identically.
+bool Base64Unescape(absl::string_view src, std::string* dest);
+
+// WebSafeBase64Unescape()
+//
+// Converts a `src` string encoded in "web safe" Base64 (RFC 4648 section 5) to
+// its binary equivalent, writing it to a `dest` buffer. If `src` contains
+// invalid characters, `dest` is cleared and returns `false`. If padding is
+// included (note that `WebSafeBase64Escape()` does not produce it), it must be
+// correct. In the padding, '=' and '.' are treated identically.
+bool WebSafeBase64Unescape(absl::string_view src, std::string* dest);
+
 // HexStringToBytes()
 //
 // Converts an ASCII hex string into bytes, returning binary data of length
diff --git a/abseil-cpp/absl/strings/escaping_test.cc b/abseil-cpp/absl/strings/escaping_test.cc
index 45671a0..9f62c1e 100644
--- a/abseil-cpp/absl/strings/escaping_test.cc
+++ b/abseil-cpp/absl/strings/escaping_test.cc
@@ -562,6 +562,7 @@
 void TestEscapeAndUnescape() {
   // Check the short strings; this tests the math (and boundaries)
   for (const auto& tc : base64_tests) {
+    // Test plain base64.
     StringType encoded("this junk should be ignored");
     absl::Base64Escape(tc.plaintext, &encoded);
     EXPECT_EQ(encoded, tc.cyphertext);
@@ -571,22 +572,26 @@
     EXPECT_TRUE(absl::Base64Unescape(encoded, &decoded));
     EXPECT_EQ(decoded, tc.plaintext);
 
-    StringType websafe(tc.cyphertext);
-    for (int c = 0; c < websafe.size(); ++c) {
-      if ('+' == websafe[c]) websafe[c] = '-';
-      if ('/' == websafe[c]) websafe[c] = '_';
+    StringType websafe_with_padding(tc.cyphertext);
+    for (unsigned int c = 0; c < websafe_with_padding.size(); ++c) {
+      if ('+' == websafe_with_padding[c]) websafe_with_padding[c] = '-';
+      if ('/' == websafe_with_padding[c]) websafe_with_padding[c] = '_';
+      // Intentionally keeping padding aka '='.
+    }
+
+    // Test plain websafe (aka without padding).
+    StringType websafe(websafe_with_padding);
+    for (unsigned int c = 0; c < websafe.size(); ++c) {
       if ('=' == websafe[c]) {
         websafe.resize(c);
         break;
       }
     }
-
     encoded = "this junk should be ignored";
     absl::WebSafeBase64Escape(tc.plaintext, &encoded);
     EXPECT_EQ(encoded, websafe);
     EXPECT_EQ(absl::WebSafeBase64Escape(tc.plaintext), websafe);
 
-    // Let's try the string version of the decoder
     decoded = "this junk should be ignored";
     EXPECT_TRUE(absl::WebSafeBase64Unescape(websafe, &decoded));
     EXPECT_EQ(decoded, tc.plaintext);
@@ -617,6 +622,48 @@
   TestEscapeAndUnescape<std::string>();
 }
 
+TEST(Base64, Padding) {
+  // Padding is optional.
+  // '.' is an acceptable padding character, just like '='.
+  std::initializer_list<absl::string_view> good_padding = {
+    "YQ",
+    "YQ==",
+    "YQ=.",
+    "YQ.=",
+    "YQ..",
+  };
+  for (absl::string_view b64 : good_padding) {
+    std::string decoded;
+    EXPECT_TRUE(absl::Base64Unescape(b64, &decoded));
+    EXPECT_EQ(decoded, "a");
+    std::string websafe_decoded;
+    EXPECT_TRUE(absl::WebSafeBase64Unescape(b64, &websafe_decoded));
+    EXPECT_EQ(websafe_decoded, "a");
+  }
+  std::initializer_list<absl::string_view> bad_padding = {
+    "YQ=",
+    "YQ.",
+    "YQ===",
+    "YQ==.",
+    "YQ=.=",
+    "YQ=..",
+    "YQ.==",
+    "YQ.=.",
+    "YQ..=",
+    "YQ...",
+    "YQ====",
+    "YQ....",
+    "YQ=====",
+    "YQ.....",
+  };
+  for (absl::string_view b64 : bad_padding) {
+    std::string decoded;
+    EXPECT_FALSE(absl::Base64Unescape(b64, &decoded));
+    std::string websafe_decoded;
+    EXPECT_FALSE(absl::WebSafeBase64Unescape(b64, &websafe_decoded));
+  }
+}
+
 TEST(Base64, DISABLED_HugeData) {
   const size_t kSize = size_t(3) * 1000 * 1000 * 1000;
   static_assert(kSize % 3 == 0, "kSize must be divisible by 3");
diff --git a/abseil-cpp/absl/strings/internal/char_map.h b/abseil-cpp/absl/strings/internal/char_map.h
index 61484de..70a9034 100644
--- a/abseil-cpp/absl/strings/internal/char_map.h
+++ b/abseil-cpp/absl/strings/internal/char_map.h
@@ -73,10 +73,10 @@
   }
 
   // Containing all the chars in the C-string 's'.
-  // Note that this is expensively recursive because of the C++11 constexpr
-  // formulation. Use only in constexpr initializers.
   static constexpr Charmap FromString(const char* s) {
-    return *s == 0 ? Charmap() : (Char(*s) | FromString(s + 1));
+    Charmap ret;
+    while (*s) ret = ret | Char(*s++);
+    return ret;
   }
 
   // Containing all the chars in the closed interval [lo,hi].
@@ -103,10 +103,9 @@
   constexpr Charmap(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3)
       : m_{b0, b1, b2, b3} {}
 
-  static constexpr uint64_t RangeForWord(unsigned char lo, unsigned char hi,
-                                         uint64_t word) {
-    return OpenRangeFromZeroForWord(hi + 1, word) &
-           ~OpenRangeFromZeroForWord(lo, word);
+  static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) {
+    return OpenRangeFromZeroForWord(static_cast<unsigned char>(hi) + 1, word) &
+           ~OpenRangeFromZeroForWord(static_cast<unsigned char>(lo), word);
   }
 
   // All the chars in the specified word of the range [0, upper).
@@ -119,13 +118,16 @@
                      : (~static_cast<uint64_t>(0) >> (64 - upper % 64));
   }
 
-  static constexpr uint64_t CharMaskForWord(unsigned char x, uint64_t word) {
-    return (x / 64 == word) ? (static_cast<uint64_t>(1) << (x % 64)) : 0;
+  static constexpr uint64_t CharMaskForWord(char x, uint64_t word) {
+    const auto unsigned_x = static_cast<unsigned char>(x);
+    return (unsigned_x / 64 == word)
+               ? (static_cast<uint64_t>(1) << (unsigned_x % 64))
+               : 0;
   }
 
- private:
-  void SetChar(unsigned char c) {
-    m_[c / 64] |= static_cast<uint64_t>(1) << (c % 64);
+  void SetChar(char c) {
+    const auto unsigned_c = static_cast<unsigned char>(c);
+    m_[unsigned_c / 64] |= static_cast<uint64_t>(1) << (unsigned_c % 64);
   }
 
   uint64_t m_[4];
diff --git a/abseil-cpp/absl/strings/internal/charconv_bigint.cc b/abseil-cpp/absl/strings/internal/charconv_bigint.cc
index ebf8c07..46b5289 100644
--- a/abseil-cpp/absl/strings/internal/charconv_bigint.cc
+++ b/abseil-cpp/absl/strings/internal/charconv_bigint.cc
@@ -242,7 +242,7 @@
       // decimal exponent to compensate.
       --exponent_adjust;
     }
-    int digit = (*begin - '0');
+    char digit = (*begin - '0');
     --significant_digits;
     if (significant_digits == 0 && std::next(begin) != end &&
         (digit == 0 || digit == 5)) {
@@ -255,7 +255,7 @@
       // 500000...000000000001 to correctly round up, rather than to nearest.
       ++digit;
     }
-    queued = 10 * queued + digit;
+    queued = 10 * queued + static_cast<uint32_t>(digit);
     ++digits_queued;
     if (digits_queued == kMaxSmallPowerOfTen) {
       MultiplyBy(kTenToNth[kMaxSmallPowerOfTen]);
@@ -296,10 +296,8 @@
         std::min(n / kLargePowerOfFiveStep, kLargestPowerOfFiveIndex);
     if (first_pass) {
       // just copy, rather than multiplying by 1
-      std::copy(
-          LargePowerOfFiveData(big_power),
-          LargePowerOfFiveData(big_power) + LargePowerOfFiveSize(big_power),
-          answer.words_);
+      std::copy_n(LargePowerOfFiveData(big_power),
+                  LargePowerOfFiveSize(big_power), answer.words_);
       answer.size_ = LargePowerOfFiveSize(big_power);
       first_pass = false;
     } else {
@@ -341,8 +339,8 @@
   std::string result;
   // Build result in reverse order
   while (copy.size() > 0) {
-    int next_digit = copy.DivMod<10>();
-    result.push_back('0' + next_digit);
+    uint32_t next_digit = copy.DivMod<10>();
+    result.push_back('0' + static_cast<char>(next_digit));
   }
   if (result.empty()) {
     result.push_back('0');
diff --git a/abseil-cpp/absl/strings/internal/charconv_bigint.h b/abseil-cpp/absl/strings/internal/charconv_bigint.h
index 8f70297..5c0c375 100644
--- a/abseil-cpp/absl/strings/internal/charconv_bigint.h
+++ b/abseil-cpp/absl/strings/internal/charconv_bigint.h
@@ -92,7 +92,7 @@
   // numbers with this many decimal digits or fewer are representable by this
   // type.
   //
-  // Analagous to std::numeric_limits<BigUnsigned>::digits10.
+  // Analogous to std::numeric_limits<BigUnsigned>::digits10.
   static constexpr int Digits10() {
     // 9975007/1035508 is very slightly less than log10(2**32).
     return static_cast<uint64_t>(max_words) * 9975007 / 1035508;
@@ -121,7 +121,7 @@
           ++size_;
         }
       }
-      std::fill(words_, words_ + word_shift, 0u);
+      std::fill_n(words_, word_shift, 0u);
     }
   }
 
@@ -197,7 +197,7 @@
   }
 
   void SetToZero() {
-    std::fill(words_, words_ + size_, 0u);
+    std::fill_n(words_, size_, 0u);
     size_ = 0;
   }
 
diff --git a/abseil-cpp/absl/strings/internal/charconv_bigint_test.cc b/abseil-cpp/absl/strings/internal/charconv_bigint_test.cc
index 363bcb0..a8b9945 100644
--- a/abseil-cpp/absl/strings/internal/charconv_bigint_test.cc
+++ b/abseil-cpp/absl/strings/internal/charconv_bigint_test.cc
@@ -69,6 +69,61 @@
     // And we should have fully rotated all bits off by now:
     EXPECT_EQ(a, BigUnsigned<84>(0u));
   }
+  {
+    // Bit shifting large and small numbers by large and small offsets.
+    // Intended to exercise bounds-checking corner on ShiftLeft() (directly
+    // and under asan).
+
+    // 2**(32*84)-1
+    const BigUnsigned<84> all_bits_one(
+        "1474444211396924248063325089479706787923460402125687709454567433186613"
+        "6228083464060749874845919674257665016359189106695900028098437021384227"
+        "3285029708032466536084583113729486015826557532750465299832071590813090"
+        "2011853039837649252477307070509704043541368002938784757296893793903797"
+        "8180292336310543540677175225040919704702800559606097685920595947397024"
+        "8303316808753252115729411497720357971050627997031988036134171378490368"
+        "6008000778741115399296162550786288457245180872759047016734959330367829"
+        "5235612397427686310674725251378116268607113017720538636924549612987647"
+        "5767411074510311386444547332882472126067840027882117834454260409440463"
+        "9345147252664893456053258463203120637089916304618696601333953616715125"
+        "2115882482473279040772264257431663818610405673876655957323083702713344"
+        "4201105427930770976052393421467136557055");
+    const BigUnsigned<84> zero(0u);
+    const BigUnsigned<84> one(1u);
+    // in bounds shifts
+    for (int i = 1; i < 84*32; ++i) {
+      // shifting all_bits_one to the left should result in a smaller number,
+      // since the high bits rotate off and the low bits are replaced with
+      // zeroes.
+      BigUnsigned<84> big_shifted = all_bits_one;
+      big_shifted.ShiftLeft(i);
+      EXPECT_GT(all_bits_one, big_shifted);
+      // Shifting 1 to the left should instead result in a larger number.
+      BigUnsigned<84> small_shifted = one;
+      small_shifted.ShiftLeft(i);
+      EXPECT_LT(one, small_shifted);
+    }
+    // Shifting by zero or a negative number has no effect
+    for (int no_op_shift : {0, -1, -84 * 32, std::numeric_limits<int>::min()}) {
+      BigUnsigned<84> big_shifted = all_bits_one;
+      big_shifted.ShiftLeft(no_op_shift);
+      EXPECT_EQ(all_bits_one, big_shifted);
+      BigUnsigned<84> small_shifted = one;
+      big_shifted.ShiftLeft(no_op_shift);
+      EXPECT_EQ(one, small_shifted);
+    }
+    // Shifting by an amount greater than the number of bits should result in
+    // zero.
+    for (int out_of_bounds_shift :
+         {84 * 32, 84 * 32 + 1, std::numeric_limits<int>::max()}) {
+      BigUnsigned<84> big_shifted = all_bits_one;
+      big_shifted.ShiftLeft(out_of_bounds_shift);
+      EXPECT_EQ(zero, big_shifted);
+      BigUnsigned<84> small_shifted = one;
+      small_shifted.ShiftLeft(out_of_bounds_shift);
+      EXPECT_EQ(zero, small_shifted);
+    }
+  }
 }
 
 TEST(BigUnsigned, MultiplyByUint32) {
diff --git a/abseil-cpp/absl/strings/internal/charconv_parse.cc b/abseil-cpp/absl/strings/internal/charconv_parse.cc
index fd6d948..98823de 100644
--- a/abseil-cpp/absl/strings/internal/charconv_parse.cc
+++ b/abseil-cpp/absl/strings/internal/charconv_parse.cc
@@ -52,7 +52,7 @@
 
 // The lowest valued 19-digit decimal mantissa we can read still contains
 // sufficient information to reconstruct a binary mantissa.
-static_assert(1000000000000000000u > (uint64_t(1) << (53 + 3)), "(b) above");
+static_assert(1000000000000000000u > (uint64_t{1} << (53 + 3)), "(b) above");
 
 // ParseFloat<16> will read the first 15 significant digits of the mantissa.
 //
@@ -190,11 +190,11 @@
 
 template <>
 unsigned ToDigit<10>(char ch) {
-  return ch - '0';
+  return static_cast<unsigned>(ch - '0');
 }
 template <>
 unsigned ToDigit<16>(char ch) {
-  return kAsciiToInt[static_cast<unsigned char>(ch)];
+  return static_cast<unsigned>(kAsciiToInt[static_cast<unsigned char>(ch)]);
 }
 
 template <>
@@ -246,8 +246,8 @@
 // ConsumeDigits does not protect against overflow on *out; max_digits must
 // be chosen with respect to type T to avoid the possibility of overflow.
 template <int base, typename T>
-std::size_t ConsumeDigits(const char* begin, const char* end, int max_digits,
-                          T* out, bool* dropped_nonzero_digit) {
+int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out,
+                  bool* dropped_nonzero_digit) {
   if (base == 10) {
     assert(max_digits <= std::numeric_limits<T>::digits10);
   } else if (base == 16) {
@@ -282,7 +282,7 @@
     *dropped_nonzero_digit = true;
   }
   *out = accumulator;
-  return begin - original_begin;
+  return static_cast<int>(begin - original_begin);
 }
 
 // Returns true if `v` is one of the chars allowed inside parentheses following
@@ -372,7 +372,7 @@
 
   int exponent_adjustment = 0;
   bool mantissa_is_inexact = false;
-  std::size_t pre_decimal_digits = ConsumeDigits<base>(
+  int pre_decimal_digits = ConsumeDigits<base>(
       begin, end, MantissaDigitsMax<base>(), &mantissa, &mantissa_is_inexact);
   begin += pre_decimal_digits;
   int digits_left;
@@ -398,14 +398,14 @@
       while (begin < end && *begin == '0') {
         ++begin;
       }
-      std::size_t zeros_skipped = begin - begin_zeros;
+      int zeros_skipped = static_cast<int>(begin - begin_zeros);
       if (zeros_skipped >= DigitLimit<base>()) {
         // refuse to parse pathological inputs
         return result;
       }
       exponent_adjustment -= static_cast<int>(zeros_skipped);
     }
-    std::size_t post_decimal_digits = ConsumeDigits<base>(
+    int post_decimal_digits = ConsumeDigits<base>(
         begin, end, digits_left, &mantissa, &mantissa_is_inexact);
     begin += post_decimal_digits;
 
diff --git a/abseil-cpp/absl/strings/internal/charconv_parse_test.cc b/abseil-cpp/absl/strings/internal/charconv_parse_test.cc
index bc2d111..2b7b082 100644
--- a/abseil-cpp/absl/strings/internal/charconv_parse_test.cc
+++ b/abseil-cpp/absl/strings/internal/charconv_parse_test.cc
@@ -19,7 +19,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/check.h"
 #include "absl/strings/str_cat.h"
 
 using absl::chars_format;
@@ -56,14 +56,14 @@
     begin_subrange = static_cast<int>(open_bracket_pos);
     s.replace(open_bracket_pos, 1, "");
     std::string::size_type close_bracket_pos = s.find(']');
-    ABSL_RAW_CHECK(close_bracket_pos != absl::string_view::npos,
-                   "Test input contains [ without matching ]");
+    CHECK_NE(close_bracket_pos, absl::string_view::npos)
+        << "Test input contains [ without matching ]";
     end_subrange = static_cast<int>(close_bracket_pos);
     s.replace(close_bracket_pos, 1, "");
   }
   const std::string::size_type expected_characters_matched = s.find('$');
-  ABSL_RAW_CHECK(expected_characters_matched != std::string::npos,
-                 "Input string must contain $");
+  CHECK_NE(expected_characters_matched, std::string::npos)
+      << "Input string must contain $";
   s.replace(expected_characters_matched, 1, "");
 
   ParsedFloat parsed =
diff --git a/abseil-cpp/absl/strings/internal/cord_data_edge.h b/abseil-cpp/absl/strings/internal/cord_data_edge.h
new file mode 100644
index 0000000..e18b33e
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_data_edge.h
@@ -0,0 +1,63 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
+#define ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
+
+#include <cassert>
+#include <cstddef>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node
+// holding a FLAT or EXTERNAL child rep. Requires `rep != nullptr`.
+inline bool IsDataEdge(const CordRep* edge) {
+  assert(edge != nullptr);
+
+  // The fast path is that `edge` is an EXTERNAL or FLAT node, making the below
+  // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL
+  // check in the slow path of the SUBSTRING check to optimize for the hot path.
+  if (edge->tag == EXTERNAL || edge->tag >= FLAT) return true;
+  if (edge->tag == SUBSTRING) edge = edge->substring()->child;
+  return edge->tag == EXTERNAL || edge->tag >= FLAT;
+}
+
+// Returns the `absl::string_view` data reference for the provided data edge.
+// Requires 'IsDataEdge(edge) == true`.
+inline absl::string_view EdgeData(const CordRep* edge) {
+  assert(IsDataEdge(edge));
+
+  size_t offset = 0;
+  const size_t length = edge->length;
+  if (edge->IsSubstring()) {
+    offset = edge->substring()->start;
+    edge = edge->substring()->child;
+  }
+  return edge->tag >= FLAT
+             ? absl::string_view{edge->flat()->Data() + offset, length}
+             : absl::string_view{edge->external()->base + offset, length};
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc b/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc
new file mode 100644
index 0000000..8fce3bc
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc
@@ -0,0 +1,130 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_data_edge.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::absl::cordrep_testing::MakeExternal;
+using ::absl::cordrep_testing::MakeFlat;
+using ::absl::cordrep_testing::MakeSubstring;
+
+TEST(CordDataEdgeTest, IsDataEdgeOnFlat) {
+  CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+  EXPECT_TRUE(IsDataEdge(rep));
+  CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnExternal) {
+  CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ...");
+  EXPECT_TRUE(IsDataEdge(rep));
+  CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfFlat) {
+  CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+  CordRep* substr = MakeSubstring(1, 20, rep);
+  EXPECT_TRUE(IsDataEdge(substr));
+  CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfExternal) {
+  CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ...");
+  CordRep* substr = MakeSubstring(1, 20, rep);
+  EXPECT_TRUE(IsDataEdge(substr));
+  CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnBtree) {
+  CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+  CordRepBtree* tree = CordRepBtree::New(rep);
+  EXPECT_FALSE(IsDataEdge(tree));
+  CordRep::Unref(tree);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnBadSubstr) {
+  CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+  CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep));
+  EXPECT_FALSE(IsDataEdge(substr));
+  CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnFlat) {
+  absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+  CordRep* rep = MakeFlat(value);
+  EXPECT_EQ(EdgeData(rep), value);
+  CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnExternal) {
+  absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+  CordRep* rep = MakeExternal(value);
+  EXPECT_EQ(EdgeData(rep), value);
+  CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnSubstringOfFlat) {
+  absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+  CordRep* rep = MakeFlat(value);
+  CordRep* substr = MakeSubstring(1, 20, rep);
+  EXPECT_EQ(EdgeData(substr), value.substr(1, 20));
+  CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnSubstringOfExternal) {
+  absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+  CordRep* rep = MakeExternal(value);
+  CordRep* substr = MakeSubstring(1, 20, rep);
+  EXPECT_EQ(EdgeData(substr), value.substr(1, 20));
+  CordRep::Unref(substr);
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+
+TEST(CordDataEdgeTest, IsDataEdgeOnNullPtr) {
+  EXPECT_DEATH(IsDataEdge(nullptr), ".*");
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnNullPtr) {
+  EXPECT_DEATH(EdgeData(nullptr), ".*");
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnBtree) {
+  CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+  CordRepBtree* tree = CordRepBtree::New(rep);
+  EXPECT_DEATH(EdgeData(tree), ".*");
+  CordRep::Unref(tree);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnBadSubstr) {
+  CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+  CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep));
+  EXPECT_DEATH(EdgeData(substr), ".*");
+  CordRep::Unref(substr);
+}
+
+#endif  // GTEST_HAS_DEATH_TEST && !NDEBUG
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_internal.cc b/abseil-cpp/absl/strings/internal/cord_internal.cc
new file mode 100644
index 0000000..b787438
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_internal.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/strings/internal/cord_internal.h"
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/str_cat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
+    kCordEnableRingBufferDefault);
+ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
+    kCordShallowSubcordsDefault);
+
+void LogFatalNodeType(CordRep* rep) {
+  ABSL_INTERNAL_LOG(FATAL, absl::StrCat("Unexpected node type: ",
+                                        static_cast<int>(rep->tag)));
+}
+
+void CordRep::Destroy(CordRep* rep) {
+  assert(rep != nullptr);
+
+  while (true) {
+    assert(!rep->refcount.IsImmortal());
+    if (rep->tag == BTREE) {
+      CordRepBtree::Destroy(rep->btree());
+      return;
+    } else if (rep->tag == RING) {
+      CordRepRing::Destroy(rep->ring());
+      return;
+    } else if (rep->tag == EXTERNAL) {
+      CordRepExternal::Delete(rep);
+      return;
+    } else if (rep->tag == SUBSTRING) {
+      CordRepSubstring* rep_substring = rep->substring();
+      rep = rep_substring->child;
+      delete rep_substring;
+      if (rep->refcount.Decrement()) {
+        return;
+      }
+    } else if (rep->tag == CRC) {
+      CordRepCrc::Destroy(rep->crc());
+      return;
+    } else {
+      assert(rep->IsFlat());
+      CordRepFlat::Delete(rep);
+      return;
+    }
+  }
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_internal.h b/abseil-cpp/absl/strings/internal/cord_internal.h
index d456eef..20dd008 100644
--- a/abseil-cpp/absl/strings/internal/cord_internal.h
+++ b/abseil-cpp/absl/strings/internal/cord_internal.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Abseil Authors.
+// Copyright 2021 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -21,46 +21,162 @@
 #include <cstdint>
 #include <type_traits>
 
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/endian.h"
 #include "absl/base/internal/invoke.h"
+#include "absl/base/optimization.h"
 #include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
 #include "absl/meta/type_traits.h"
 #include "absl/strings/string_view.h"
 
+// We can only add poisoning if we can detect consteval executions.
+#if defined(ABSL_HAVE_CONSTANT_EVALUATED) && \
+    (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+     defined(ABSL_HAVE_MEMORY_SANITIZER))
+#define ABSL_INTERNAL_CORD_HAVE_SANITIZER 1
+#endif
+
+#define ABSL_CORD_INTERNAL_NO_SANITIZE \
+  ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace cord_internal {
 
-// Wraps std::atomic for reference counting.
-class Refcount {
- public:
-  Refcount() : count_{1} {}
-  ~Refcount() {}
+// The overhead of a vtable is too much for Cord, so we roll our own subclasses
+// using only a single byte to differentiate classes from each other - the "tag"
+// byte.  Define the subclasses first so we can provide downcasting helper
+// functions in the base class.
+struct CordRep;
+struct CordRepConcat;
+struct CordRepExternal;
+struct CordRepFlat;
+struct CordRepSubstring;
+struct CordRepCrc;
+class CordRepRing;
+class CordRepBtree;
 
-  // Increments the reference count by 1. Imposes no memory ordering.
-  inline void Increment() { count_.fetch_add(1, std::memory_order_relaxed); }
+class CordzInfo;
+
+// Default feature enable states for cord ring buffers
+enum CordFeatureDefaults {
+  kCordEnableRingBufferDefault = false,
+  kCordShallowSubcordsDefault = false
+};
+
+extern std::atomic<bool> cord_ring_buffer_enabled;
+extern std::atomic<bool> shallow_subcords_enabled;
+
+inline void enable_cord_ring_buffer(bool enable) {
+  cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
+}
+
+inline void enable_shallow_subcords(bool enable) {
+  shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
+}
+
+enum Constants {
+  // The inlined size to use with absl::InlinedVector.
+  //
+  // Note: The InlinedVectors in this file (and in cord.h) do not need to use
+  // the same value for their inlined size. The fact that they do is historical.
+  // It may be desirable for each to use a different inlined size optimized for
+  // that InlinedVector's usage.
+  //
+  // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
+  // the inlined vector size (47 exists for backward compatibility).
+  kInlinedVectorSize = 47,
+
+  // Prefer copying blocks of at most this size, otherwise reference count.
+  kMaxBytesToCopy = 511
+};
+
+// Emits a fatal error "Unexpected node type: xyz" and aborts the program.
+ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep);
+
+// Fast implementation of memmove for up to 15 bytes. This implementation is
+// safe for overlapping regions. If nullify_tail is true, the destination is
+// padded with '\0' up to 15 bytes.
+template <bool nullify_tail = false>
+inline void SmallMemmove(char* dst, const char* src, size_t n) {
+  if (n >= 8) {
+    assert(n <= 15);
+    uint64_t buf1;
+    uint64_t buf2;
+    memcpy(&buf1, src, 8);
+    memcpy(&buf2, src + n - 8, 8);
+    if (nullify_tail) {
+      memset(dst + 7, 0, 8);
+    }
+    memcpy(dst, &buf1, 8);
+    memcpy(dst + n - 8, &buf2, 8);
+  } else if (n >= 4) {
+    uint32_t buf1;
+    uint32_t buf2;
+    memcpy(&buf1, src, 4);
+    memcpy(&buf2, src + n - 4, 4);
+    if (nullify_tail) {
+      memset(dst + 4, 0, 4);
+      memset(dst + 7, 0, 8);
+    }
+    memcpy(dst, &buf1, 4);
+    memcpy(dst + n - 4, &buf2, 4);
+  } else {
+    if (n != 0) {
+      dst[0] = src[0];
+      dst[n / 2] = src[n / 2];
+      dst[n - 1] = src[n - 1];
+    }
+    if (nullify_tail) {
+      memset(dst + 7, 0, 8);
+      memset(dst + n, 0, 8);
+    }
+  }
+}
+
+// Compact class for tracking the reference count and state flags for CordRep
+// instances.  Data is stored in an atomic int32_t for compactness and speed.
+class RefcountAndFlags {
+ public:
+  constexpr RefcountAndFlags() : count_{kRefIncrement} {}
+  struct Immortal {};
+  explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
+
+  // Increments the reference count. Imposes no memory ordering.
+  inline void Increment() {
+    count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
+  }
 
   // Asserts that the current refcount is greater than 0. If the refcount is
-  // greater than 1, decrements the reference count by 1.
+  // greater than 1, decrements the reference count.
   //
   // Returns false if there are no references outstanding; true otherwise.
   // Inserts barriers to ensure that state written before this method returns
   // false will be visible to a thread that just observed this method returning
-  // false.
+  // false.  Always returns false when the immortal bit is set.
   inline bool Decrement() {
     int32_t refcount = count_.load(std::memory_order_acquire);
-    assert(refcount > 0);
-    return refcount != 1 && count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
+    assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag);
+    return refcount != kRefIncrement &&
+           (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
+            kHighRefcountMask) != 0;
   }
 
   // Same as Decrement but expect that refcount is greater than 1.
   inline bool DecrementExpectHighRefcount() {
-    int32_t refcount = count_.fetch_sub(1, std::memory_order_acq_rel);
-    assert(refcount > 0);
-    return refcount != 1;
+    int32_t refcount =
+        count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel);
+    assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag);
+    return (refcount & kHighRefcountMask) != 0;
   }
 
   // Returns the current reference count using acquire semantics.
-  inline int32_t Get() const { return count_.load(std::memory_order_acquire); }
+  inline size_t Get() const {
+    return static_cast<size_t>(count_.load(std::memory_order_acquire) >>
+                               kNumFlags);
+  }
 
   // Returns whether the atomic integer is 1.
   // If the reference count is used in the conventional way, a
@@ -69,51 +185,174 @@
   // This call performs the test for a reference count of one, and
   // performs the memory barrier needed for the owning thread
   // to act on the object, knowing that it has exclusive access to the
-  // object.
-  inline bool IsOne() { return count_.load(std::memory_order_acquire) == 1; }
+  // object.  Always returns false when the immortal bit is set.
+  inline bool IsOne() {
+    return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
+           kRefIncrement;
+  }
+
+  bool IsImmortal() const {
+    return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
+  }
 
  private:
+  // We reserve the bottom bits for flags.
+  // kImmortalBit indicates that this entity should never be collected; it is
+  // used for the StringConstant constructor to avoid collecting immutable
+  // constant cords.
+  // kReservedFlag is reserved for future use.
+  enum Flags {
+    kNumFlags = 2,
+
+    kImmortalFlag = 0x1,
+    kReservedFlag = 0x2,
+    kRefIncrement = (1 << kNumFlags),
+
+    // Bitmask to use when checking refcount by equality.  This masks out
+    // all flags except kImmortalFlag, which is part of the refcount for
+    // purposes of equality.  (A refcount of 0 or 1 does not count as 0 or 1
+    // if the immortal bit is set.)
+    kRefcountMask = ~kReservedFlag,
+
+    // Bitmask to use when checking if refcount is equal to 1 and not
+    // immortal when decrementing the refcount. This masks out kRefIncrement and
+    // all flags except kImmortalFlag. If the masked RefcountAndFlags is 0, we
+    // assume the refcount is equal to 1, since we know it's not immortal and
+    // not greater than 1. If the masked RefcountAndFlags is not 0, we can
+    // assume the refcount is not equal to 1 since either a higher bit in the
+    // refcount is set, or kImmortal is set.
+    kHighRefcountMask = kRefcountMask & ~kRefIncrement,
+  };
+
   std::atomic<int32_t> count_;
 };
 
-// The overhead of a vtable is too much for Cord, so we roll our own subclasses
-// using only a single byte to differentiate classes from each other - the "tag"
-// byte.  Define the subclasses first so we can provide downcasting helper
-// functions in the base class.
+// Various representations that we allow
+enum CordRepKind {
+  UNUSED_0 = 0,
+  SUBSTRING = 1,
+  CRC = 2,
+  BTREE = 3,
+  RING = 4,
+  EXTERNAL = 5,
 
-struct CordRepConcat;
-struct CordRepSubstring;
-struct CordRepExternal;
+  // We have different tags for different sized flat arrays,
+  // starting with FLAT, and limited to MAX_FLAT_TAG. The below values map to an
+  // allocated range of 32 bytes to 256 KB. The current granularity is:
+  // - 8 byte granularity for flat sizes in [32 - 512]
+  // - 64 byte granularity for flat sizes in (512 - 8KiB]
+  // - 4KiB byte granularity for flat sizes in (8KiB, 256 KiB]
+  // If a new tag is needed in the future, then 'FLAT' and 'MAX_FLAT_TAG' should
+  // be adjusted as well as the Tag <---> Size mapping logic so that FLAT still
+  // represents the minimum flat allocation size. (32 bytes as of now).
+  FLAT = 6,
+  MAX_FLAT_TAG = 248
+};
+
+// There are various locations where we want to check if some rep is a 'plain'
+// data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
+// can perform this check in a single branch as 'tag >= EXTERNAL'
+// Likewise, we have some locations where we check for 'ring or external/flat',
+// so likewise align RING to EXTERNAL.
+// Note that we can leave this optimization to the compiler. The compiler will
+// DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
+static_assert(RING == BTREE + 1, "BTREE and RING not consecutive");
+static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
+static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
 
 struct CordRep {
+  // Result from an `extract edge` operation. Contains the (possibly changed)
+  // tree node as well as the extracted edge, or {tree, nullptr} if no edge
+  // could be extracted.
+  // On success, the returned `tree` value is null if `extracted` was the only
+  // data edge inside the tree, a data edge if there were only two data edges in
+  // the tree, or the (possibly new / smaller) remaining tree with the extracted
+  // data edge removed.
+  struct ExtractResult {
+    CordRep* tree;
+    CordRep* extracted;
+  };
+
+  CordRep() = default;
+  constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
+      : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
+
   // The following three fields have to be less than 32 bytes since
-  // that is the smallest supported flat node size.
+  // that is the smallest supported flat node size. Some code optimizations rely
+  // on the specific layout of these fields. Notably: the non-trivial field
+  // `refcount` being preceded by `length`, and being tailed by POD data
+  // members only.
+  // # LINT.IfChange
   size_t length;
-  Refcount refcount;
+  RefcountAndFlags refcount;
   // If tag < FLAT, it represents CordRepKind and indicates the type of node.
   // Otherwise, the node type is CordRepFlat and the tag is the encoded size.
   uint8_t tag;
-  char data[1];  // Starting point for flat array: MUST BE LAST FIELD of CordRep
 
-  inline CordRepConcat* concat();
-  inline const CordRepConcat* concat() const;
+  // `storage` provides two main purposes:
+  // - the starting point for FlatCordRep.Data() [flexible-array-member]
+  // - 3 bytes of additional storage for use by derived classes.
+  // The latter is used by CordrepConcat and CordRepBtree. CordRepConcat stores
+  // a 'depth' value in storage[0], and the (future) CordRepBtree class stores
+  // `height`, `begin` and `end` in the 3 entries. Otherwise we would need to
+  // allocate room for these in the derived class, as not all compilers reuse
+  // padding space from the base class (clang and gcc do, MSVC does not, etc)
+  uint8_t storage[3];
+  // # LINT.ThenChange(cord_rep_btree.h:copy_raw)
+
+  // Returns true if this instance's tag matches the requested type.
+  constexpr bool IsRing() const { return tag == RING; }
+  constexpr bool IsSubstring() const { return tag == SUBSTRING; }
+  constexpr bool IsCrc() const { return tag == CRC; }
+  constexpr bool IsExternal() const { return tag == EXTERNAL; }
+  constexpr bool IsFlat() const { return tag >= FLAT; }
+  constexpr bool IsBtree() const { return tag == BTREE; }
+
+  inline CordRepRing* ring();
+  inline const CordRepRing* ring() const;
   inline CordRepSubstring* substring();
   inline const CordRepSubstring* substring() const;
+  inline CordRepCrc* crc();
+  inline const CordRepCrc* crc() const;
   inline CordRepExternal* external();
   inline const CordRepExternal* external() const;
-};
+  inline CordRepFlat* flat();
+  inline const CordRepFlat* flat() const;
+  inline CordRepBtree* btree();
+  inline const CordRepBtree* btree() const;
 
-struct CordRepConcat : public CordRep {
-  CordRep* left;
-  CordRep* right;
+  // --------------------------------------------------------------------
+  // Memory management
 
-  uint8_t depth() const { return static_cast<uint8_t>(data[0]); }
-  void set_depth(uint8_t depth) { data[0] = static_cast<char>(depth); }
+  // Destroys the provided `rep`.
+  static void Destroy(CordRep* rep);
+
+  // Increments the reference count of `rep`.
+  // Requires `rep` to be a non-null pointer value.
+  static inline CordRep* Ref(CordRep* rep);
+
+  // Decrements the reference count of `rep`. Destroys rep if count reaches
+  // zero. Requires `rep` to be a non-null pointer value.
+  static inline void Unref(CordRep* rep);
 };
 
 struct CordRepSubstring : public CordRep {
   size_t start;  // Starting offset of substring in child
   CordRep* child;
+
+  // Creates a substring on `child`, adopting a reference on `child`.
+  // Requires `child` to be either a flat or external node, and `pos` and `n` to
+  // form a non-empty partial sub range of `'child`, i.e.:
+  // `n > 0 && n < length && n + pos <= length`
+  static inline CordRepSubstring* Create(CordRep* child, size_t pos, size_t n);
+
+  // Creates a substring of `rep`. Does not adopt a reference on `rep`.
+  // Requires `IsDataEdge(rep) && n > 0 && pos + n <= rep->length`.
+  // If `n == rep->length` then this method returns `CordRep::Ref(rep)`
+  // If `rep` is a substring of a flat or external node, then this method will
+  // return a new substring of that flat or external node with `pos` adjusted
+  // with the original `start` position.
+  static inline CordRep* Substring(CordRep* rep, size_t pos, size_t n);
 };
 
 // Type for function pointer that will invoke the releaser function and also
@@ -124,9 +363,19 @@
 // External CordReps are allocated together with a type erased releaser. The
 // releaser is stored in the memory directly following the CordRepExternal.
 struct CordRepExternal : public CordRep {
+  CordRepExternal() = default;
+  explicit constexpr CordRepExternal(absl::string_view str)
+      : CordRep(RefcountAndFlags::Immortal{}, str.size()),
+        base(str.data()),
+        releaser_invoker(nullptr) {}
+
   const char* base;
   // Pointer to function that knows how to call and destroy the releaser.
   ExternalReleaserInvoker releaser_invoker;
+
+  // Deletes (releases) the external rep.
+  // Requires rep != nullptr and rep->IsExternal()
+  static void Delete(CordRep* rep);
 };
 
 struct Rank1 {};
@@ -167,7 +416,502 @@
   }
 };
 
+inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos,
+                                                  size_t n) {
+  assert(child != nullptr);
+  assert(n > 0);
+  assert(n < child->length);
+  assert(pos < child->length);
+  assert(n <= child->length - pos);
+
+  // TODO(b/217376272): Harden internal logic.
+  // Move to strategical places inside the Cord logic and make this an assert.
+  if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) {
+    LogFatalNodeType(child);
+  }
+
+  CordRepSubstring* rep = new CordRepSubstring();
+  rep->length = n;
+  rep->tag = SUBSTRING;
+  rep->start = pos;
+  rep->child = child;
+  return rep;
+}
+
+inline CordRep* CordRepSubstring::Substring(CordRep* rep, size_t pos,
+                                            size_t n) {
+  assert(rep != nullptr);
+  assert(n != 0);
+  assert(pos < rep->length);
+  assert(n <= rep->length - pos);
+  if (n == rep->length) return CordRep::Ref(rep);
+  if (rep->IsSubstring()) {
+    pos += rep->substring()->start;
+    rep = rep->substring()->child;
+  }
+  CordRepSubstring* substr = new CordRepSubstring();
+  substr->length = n;
+  substr->tag = SUBSTRING;
+  substr->start = pos;
+  substr->child = CordRep::Ref(rep);
+  return substr;
+}
+
+inline void CordRepExternal::Delete(CordRep* rep) {
+  assert(rep != nullptr && rep->IsExternal());
+  auto* rep_external = static_cast<CordRepExternal*>(rep);
+  assert(rep_external->releaser_invoker != nullptr);
+  rep_external->releaser_invoker(rep_external);
+}
+
+template <typename Str>
+struct ConstInitExternalStorage {
+  ABSL_CONST_INIT static CordRepExternal value;
+};
+
+template <typename Str>
+ABSL_CONST_INIT CordRepExternal
+    ConstInitExternalStorage<Str>::value(Str::value);
+
+enum {
+  kMaxInline = 15,
+};
+
+constexpr char GetOrNull(absl::string_view data, size_t pos) {
+  return pos < data.size() ? data[pos] : '\0';
+}
+
+// We store cordz_info as 64 bit pointer value in little endian format. This
+// guarantees that the least significant byte of cordz_info matches the first
+// byte of the inline data representation in `data`, which holds the inlined
+// size or the 'is_tree' bit.
+using cordz_info_t = int64_t;
+
+// Assert that the `cordz_info` pointer value perfectly overlaps the last half
+// of `data` and can hold a pointer value.
+static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
+static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
+
+// LittleEndianByte() creates a little endian representation of 'value', i.e.:
+// a little endian value where the first byte in the host's representation
+// holds 'value`, with all other bytes being 0.
+static constexpr cordz_info_t LittleEndianByte(unsigned char value) {
+#if defined(ABSL_IS_BIG_ENDIAN)
+  return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
+#else
+  return value;
+#endif
+}
+
+class InlineData {
+ public:
+  // DefaultInitType forces the use of the default initialization constructor.
+  enum DefaultInitType { kDefaultInit };
+
+  // kNullCordzInfo holds the little endian representation of intptr_t(1)
+  // This is the 'null' / initial value of 'cordz_info'. The null value
+  // is specifically big endian 1 as with 64-bit pointers, the last
+  // byte of cordz_info overlaps with the last byte holding the tag.
+  static constexpr cordz_info_t kNullCordzInfo = LittleEndianByte(1);
+
+  // kTagOffset contains the offset of the control byte / tag. This constant is
+  // intended mostly for debugging purposes: do not remove this constant as it
+  // is actively inspected and used by gdb pretty printing code.
+  static constexpr size_t kTagOffset = 0;
+
+  // Implement `~InlineData()` conditionally: we only need this destructor to
+  // unpoison poisoned instances under *SAN, and it will only compile correctly
+  // if the current compiler supports `absl::is_constant_evaluated()`.
+#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER
+  ~InlineData() noexcept { unpoison(); }
+#endif
+
+  constexpr InlineData() noexcept { poison_this(); }
+
+  explicit InlineData(DefaultInitType) noexcept : rep_(kDefaultInit) {
+    poison_this();
+  }
+
+  explicit InlineData(CordRep* rep) noexcept : rep_(rep) {
+    ABSL_ASSERT(rep != nullptr);
+  }
+
+  // Explicit constexpr constructor to create a constexpr InlineData
+  // value. Creates an inlined SSO value if `rep` is null, otherwise
+  // creates a tree instance value.
+  constexpr InlineData(absl::string_view sv, CordRep* rep) noexcept
+      : rep_(rep ? Rep(rep) : Rep(sv)) {
+    poison();
+  }
+
+  constexpr InlineData(const InlineData& rhs) noexcept;
+  InlineData& operator=(const InlineData& rhs) noexcept;
+
+  friend bool operator==(const InlineData& lhs, const InlineData& rhs) {
+#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER
+    const Rep l = lhs.rep_.SanitizerSafeCopy();
+    const Rep r = rhs.rep_.SanitizerSafeCopy();
+    return memcmp(&l, &r, sizeof(l)) == 0;
+#else
+    return memcmp(&lhs, &rhs, sizeof(lhs)) == 0;
+#endif
+  }
+  friend bool operator!=(const InlineData& lhs, const InlineData& rhs) {
+    return !operator==(lhs, rhs);
+  }
+
+  // Poisons the unused inlined SSO data if the current instance
+  // is inlined, else un-poisons the entire instance.
+  constexpr void poison();
+
+  // Un-poisons this instance.
+  constexpr void unpoison();
+
+  // Poisons the current instance. This is used on default initialization.
+  constexpr void poison_this();
+
+  // Returns true if the current instance is empty.
+  // The 'empty value' is an inlined data value of zero length.
+  bool is_empty() const { return rep_.tag() == 0; }
+
+  // Returns true if the current instance holds a tree value.
+  bool is_tree() const { return (rep_.tag() & 1) != 0; }
+
+  // Returns true if the current instance holds a cordz_info value.
+  // Requires the current instance to hold a tree value.
+  bool is_profiled() const {
+    assert(is_tree());
+    return rep_.cordz_info() != kNullCordzInfo;
+  }
+
+  // Returns true if either of the provided instances hold a cordz_info value.
+  // This method is more efficient than the equivalent `data1.is_profiled() ||
+  // data2.is_profiled()`. Requires both arguments to hold a tree.
+  static bool is_either_profiled(const InlineData& data1,
+                                 const InlineData& data2) {
+    assert(data1.is_tree() && data2.is_tree());
+    return (data1.rep_.cordz_info() | data2.rep_.cordz_info()) !=
+           kNullCordzInfo;
+  }
+
+  // Returns the cordz_info sampling instance for this instance, or nullptr
+  // if the current instance is not sampled and does not have CordzInfo data.
+  // Requires the current instance to hold a tree value.
+  CordzInfo* cordz_info() const {
+    assert(is_tree());
+    intptr_t info = static_cast<intptr_t>(absl::little_endian::ToHost64(
+        static_cast<uint64_t>(rep_.cordz_info())));
+    assert(info & 1);
+    return reinterpret_cast<CordzInfo*>(info - 1);
+  }
+
+  // Sets the current cordz_info sampling instance for this instance, or nullptr
+  // if the current instance is not sampled and does not have CordzInfo data.
+  // Requires the current instance to hold a tree value.
+  void set_cordz_info(CordzInfo* cordz_info) {
+    assert(is_tree());
+    uintptr_t info = reinterpret_cast<uintptr_t>(cordz_info) | 1;
+    rep_.set_cordz_info(
+        static_cast<cordz_info_t>(absl::little_endian::FromHost64(info)));
+  }
+
+  // Resets the current cordz_info to null / empty.
+  void clear_cordz_info() {
+    assert(is_tree());
+    rep_.set_cordz_info(kNullCordzInfo);
+  }
+
+  // Returns a read only pointer to the character data inside this instance.
+  // Requires the current instance to hold inline data.
+  const char* as_chars() const {
+    assert(!is_tree());
+    return rep_.as_chars();
+  }
+
+  // Returns a mutable pointer to the character data inside this instance.
+  // Should be used for 'write only' operations setting an inlined value.
+  // Applications can set the value of inlined data either before or after
+  // setting the inlined size, i.e., both of the below are valid:
+  //
+  //   // Set inlined data and inline size
+  //   memcpy(data_.as_chars(), data, size);
+  //   data_.set_inline_size(size);
+  //
+  //   // Set inlined size and inline data
+  //   data_.set_inline_size(size);
+  //   memcpy(data_.as_chars(), data, size);
+  //
+  // It's an error to read from the returned pointer without a preceding write
+  // if the current instance does not hold inline data, i.e.: is_tree() == true.
+  char* as_chars() { return rep_.as_chars(); }
+
+  // Returns the tree value of this value.
+  // Requires the current instance to hold a tree value.
+  CordRep* as_tree() const {
+    assert(is_tree());
+    return rep_.tree();
+  }
+
+  void set_inline_data(const char* data, size_t n) {
+    ABSL_ASSERT(n <= kMaxInline);
+    unpoison();
+    rep_.set_tag(static_cast<int8_t>(n << 1));
+    SmallMemmove<true>(rep_.as_chars(), data, n);
+    poison();
+  }
+
+  void copy_max_inline_to(char* dst) const {
+    assert(!is_tree());
+    memcpy(dst, rep_.SanitizerSafeCopy().as_chars(), kMaxInline);
+  }
+
+  // Initialize this instance to holding the tree value `rep`,
+  // initializing the cordz_info to null, i.e.: 'not profiled'.
+  void make_tree(CordRep* rep) {
+    unpoison();
+    rep_.make_tree(rep);
+  }
+
+  // Set the tree value of this instance to 'rep`.
+  // Requires the current instance to already hold a tree value.
+  // Does not affect the value of cordz_info.
+  void set_tree(CordRep* rep) {
+    assert(is_tree());
+    rep_.set_tree(rep);
+  }
+
+  // Returns the size of the inlined character data inside this instance.
+  // Requires the current instance to hold inline data.
+  size_t inline_size() const { return rep_.inline_size(); }
+
+  // Sets the size of the inlined character data inside this instance.
+  // Requires `size` to be <= kMaxInline.
+  // See the documentation on 'as_chars()' for more information and examples.
+  void set_inline_size(size_t size) {
+    unpoison();
+    rep_.set_inline_size(size);
+    poison();
+  }
+
+  // Compares 'this' inlined data  with rhs. The comparison is a straightforward
+  // lexicographic comparison. `Compare()` returns values as follows:
+  //
+  //   -1  'this' InlineData instance is smaller
+  //    0  the InlineData instances are equal
+  //    1  'this' InlineData instance larger
+  int Compare(const InlineData& rhs) const {
+    return Compare(rep_.SanitizerSafeCopy(), rhs.rep_.SanitizerSafeCopy());
+  }
+
+ private:
+  struct Rep {
+    // See cordz_info_t for forced alignment and size of `cordz_info` details.
+    struct AsTree {
+      explicit constexpr AsTree(absl::cord_internal::CordRep* tree)
+          : rep(tree) {}
+      cordz_info_t cordz_info = kNullCordzInfo;
+      absl::cord_internal::CordRep* rep;
+    };
+
+    explicit Rep(DefaultInitType) {}
+    constexpr Rep() : data{0} {}
+    constexpr Rep(const Rep&) = default;
+    constexpr Rep& operator=(const Rep&) = default;
+
+    explicit constexpr Rep(CordRep* rep) : as_tree(rep) {}
+
+    explicit constexpr Rep(absl::string_view chars)
+        : data{static_cast<char>((chars.size() << 1)),
+               GetOrNull(chars, 0),
+               GetOrNull(chars, 1),
+               GetOrNull(chars, 2),
+               GetOrNull(chars, 3),
+               GetOrNull(chars, 4),
+               GetOrNull(chars, 5),
+               GetOrNull(chars, 6),
+               GetOrNull(chars, 7),
+               GetOrNull(chars, 8),
+               GetOrNull(chars, 9),
+               GetOrNull(chars, 10),
+               GetOrNull(chars, 11),
+               GetOrNull(chars, 12),
+               GetOrNull(chars, 13),
+               GetOrNull(chars, 14)} {}
+
+    // Disable sanitizer as we must always be able to read `tag`.
+    ABSL_CORD_INTERNAL_NO_SANITIZE
+    int8_t tag() const { return reinterpret_cast<const int8_t*>(this)[0]; }
+    void set_tag(int8_t rhs) { reinterpret_cast<int8_t*>(this)[0] = rhs; }
+
+    char* as_chars() { return data + 1; }
+    const char* as_chars() const { return data + 1; }
+
+    bool is_tree() const { return (tag() & 1) != 0; }
+
+    size_t inline_size() const {
+      ABSL_ASSERT(!is_tree());
+      return static_cast<size_t>(tag()) >> 1;
+    }
+
+    void set_inline_size(size_t size) {
+      ABSL_ASSERT(size <= kMaxInline);
+      set_tag(static_cast<int8_t>(size << 1));
+    }
+
+    CordRep* tree() const { return as_tree.rep; }
+    void set_tree(CordRep* rhs) { as_tree.rep = rhs; }
+
+    cordz_info_t cordz_info() const { return as_tree.cordz_info; }
+    void set_cordz_info(cordz_info_t rhs) { as_tree.cordz_info = rhs; }
+
+    void make_tree(CordRep* tree) {
+      as_tree.rep = tree;
+      as_tree.cordz_info = kNullCordzInfo;
+    }
+
+#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER
+    constexpr Rep SanitizerSafeCopy() const {
+      if (!absl::is_constant_evaluated()) {
+        Rep res;
+        if (is_tree()) {
+          res = *this;
+        } else {
+          res.set_tag(tag());
+          memcpy(res.as_chars(), as_chars(), inline_size());
+        }
+        return res;
+      } else {
+        return *this;
+      }
+    }
+#else
+    constexpr const Rep& SanitizerSafeCopy() const { return *this; }
+#endif
+
+    // If the data has length <= kMaxInline, we store it in `data`, and
+    // store the size in the first char of `data` shifted left + 1.
+    // Else we store it in a tree and store a pointer to that tree in
+    // `as_tree.rep` with a tagged pointer to make `tag() & 1` non zero.
+    union {
+      char data[kMaxInline + 1];
+      AsTree as_tree;
+    };
+  };
+
+  // Private implementation of `Compare()`
+  static inline int Compare(const Rep& lhs, const Rep& rhs) {
+    uint64_t x, y;
+    memcpy(&x, lhs.as_chars(), sizeof(x));
+    memcpy(&y, rhs.as_chars(), sizeof(y));
+    if (x == y) {
+      memcpy(&x, lhs.as_chars() + 7, sizeof(x));
+      memcpy(&y, rhs.as_chars() + 7, sizeof(y));
+      if (x == y) {
+        if (lhs.inline_size() == rhs.inline_size()) return 0;
+        return lhs.inline_size() < rhs.inline_size() ? -1 : 1;
+      }
+    }
+    x = absl::big_endian::FromHost64(x);
+    y = absl::big_endian::FromHost64(y);
+    return x < y ? -1 : 1;
+  }
+
+  Rep rep_;
+};
+
+static_assert(sizeof(InlineData) == kMaxInline + 1, "");
+
+#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER
+
+constexpr InlineData::InlineData(const InlineData& rhs) noexcept
+    : rep_(rhs.rep_.SanitizerSafeCopy()) {
+  poison();
+}
+
+inline InlineData& InlineData::operator=(const InlineData& rhs) noexcept {
+  unpoison();
+  rep_ = rhs.rep_.SanitizerSafeCopy();
+  poison();
+  return *this;
+}
+
+constexpr void InlineData::poison_this() {
+  if (!absl::is_constant_evaluated()) {
+    container_internal::SanitizerPoisonObject(this);
+  }
+}
+
+constexpr void InlineData::unpoison() {
+  if (!absl::is_constant_evaluated()) {
+    container_internal::SanitizerUnpoisonObject(this);
+  }
+}
+
+constexpr void InlineData::poison() {
+  if (!absl::is_constant_evaluated()) {
+    if (is_tree()) {
+      container_internal::SanitizerUnpoisonObject(this);
+    } else if (const size_t size = inline_size()) {
+      if (size < kMaxInline) {
+        const char* end = rep_.as_chars() + size;
+        container_internal::SanitizerPoisonMemoryRegion(end, kMaxInline - size);
+      }
+    } else {
+      container_internal::SanitizerPoisonObject(this);
+    }
+  }
+}
+
+#else  // ABSL_INTERNAL_CORD_HAVE_SANITIZER
+
+constexpr InlineData::InlineData(const InlineData&) noexcept = default;
+inline InlineData& InlineData::operator=(const InlineData&) noexcept = default;
+
+constexpr void InlineData::poison_this() {}
+constexpr void InlineData::unpoison() {}
+constexpr void InlineData::poison() {}
+
+#endif  // ABSL_INTERNAL_CORD_HAVE_SANITIZER
+
+inline CordRepSubstring* CordRep::substring() {
+  assert(IsSubstring());
+  return static_cast<CordRepSubstring*>(this);
+}
+
+inline const CordRepSubstring* CordRep::substring() const {
+  assert(IsSubstring());
+  return static_cast<const CordRepSubstring*>(this);
+}
+
+inline CordRepExternal* CordRep::external() {
+  assert(IsExternal());
+  return static_cast<CordRepExternal*>(this);
+}
+
+inline const CordRepExternal* CordRep::external() const {
+  assert(IsExternal());
+  return static_cast<const CordRepExternal*>(this);
+}
+
+inline CordRep* CordRep::Ref(CordRep* rep) {
+  // ABSL_ASSUME is a workaround for
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105585
+  ABSL_ASSUME(rep != nullptr);
+  rep->refcount.Increment();
+  return rep;
+}
+
+inline void CordRep::Unref(CordRep* rep) {
+  assert(rep != nullptr);
+  // Expect refcount to be 0. Avoiding the cost of an atomic decrement should
+  // typically outweigh the cost of an extra branch checking for ref == 1.
+  if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
+    Destroy(rep);
+  }
+}
+
 }  // namespace cord_internal
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 #endif  // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree.cc b/abseil-cpp/absl/strings/internal/cord_rep_btree.cc
new file mode 100644
index 0000000..05bd0e2
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree.cc
@@ -0,0 +1,1241 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_btree.h"
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+#include <ostream>
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/optimization.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_consume.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordRepBtree::kMaxCapacity;
+#endif
+
+namespace {
+
+using NodeStack = CordRepBtree * [CordRepBtree::kMaxDepth];
+using EdgeType = CordRepBtree::EdgeType;
+using OpResult = CordRepBtree::OpResult;
+using CopyResult = CordRepBtree::CopyResult;
+
+constexpr auto kFront = CordRepBtree::kFront;
+constexpr auto kBack = CordRepBtree::kBack;
+
+ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
+
+// Implementation of the various 'Dump' functions.
+// Prints the entire tree structure or 'rep'. External callers should
+// not specify 'depth' and leave it to its default (0) value.
+// Rep may be a CordRepBtree tree, or a SUBSTRING / EXTERNAL / FLAT node.
+void DumpAll(const CordRep* rep,
+             bool include_contents,
+             std::ostream& stream,
+             size_t depth = 0) {
+  // Allow for full height trees + substring -> flat / external nodes.
+  assert(depth <= CordRepBtree::kMaxDepth + 2);
+  std::string sharing = const_cast<CordRep*>(rep)->refcount.IsOne()
+                            ? std::string("Private")
+                            : absl::StrCat("Shared(", rep->refcount.Get(), ")");
+  std::string sptr = absl::StrCat("0x", absl::Hex(rep));
+
+  // Dumps the data contents of `rep` if `include_contents` is true.
+  // Always emits a new line character.
+  auto maybe_dump_data = [&stream, include_contents](const CordRep* r) {
+    if (include_contents) {
+      // Allow for up to 60 wide display of content data, which with some
+      // indentation and prefix / labels keeps us within roughly 80-100 wide.
+      constexpr size_t kMaxDataLength = 60;
+      stream << ", data = \""
+             << EdgeData(r).substr(0, kMaxDataLength)
+             << (r->length > kMaxDataLength ? "\"..." : "\"");
+    }
+    stream << '\n';
+  };
+
+  // For each level, we print the 'shared/private' state and the rep pointer,
+  // indented by two spaces per recursive depth.
+  stream << std::string(depth * 2, ' ') << sharing << " (" << sptr << ") ";
+
+  if (rep->IsBtree()) {
+    const CordRepBtree* node = rep->btree();
+    std::string label =
+        node->height() ? absl::StrCat("Node(", node->height(), ")") : "Leaf";
+    stream << label << ", len = " << node->length
+           << ", begin = " << node->begin() << ", end = " << node->end()
+           << "\n";
+    for (CordRep* edge : node->Edges()) {
+      DumpAll(edge, include_contents, stream, depth + 1);
+    }
+  } else if (rep->tag == SUBSTRING) {
+    const CordRepSubstring* substring = rep->substring();
+    stream << "Substring, len = " << rep->length
+           << ", start = " << substring->start;
+    maybe_dump_data(rep);
+    DumpAll(substring->child, include_contents, stream, depth + 1);
+  } else if (rep->tag >= FLAT) {
+    stream << "Flat, len = " << rep->length
+           << ", cap = " << rep->flat()->Capacity();
+    maybe_dump_data(rep);
+  } else if (rep->tag == EXTERNAL) {
+    stream << "Extn, len = " << rep->length;
+    maybe_dump_data(rep);
+  }
+}
+
+// TODO(b/192061034): add 'bytes to copy' logic to avoid large slop on substring
+// small data out of large reps, and general efficiency of 'always copy small
+// data'. Consider making this a cord rep internal library function.
+CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) {
+  assert(n != 0);
+  assert(offset + n <= rep->length);
+  assert(offset != 0 || n != rep->length);
+
+  if (rep->tag == SUBSTRING) {
+    CordRepSubstring* substring = rep->substring();
+    offset += substring->start;
+    rep = CordRep::Ref(substring->child);
+    CordRep::Unref(substring);
+  }
+  assert(rep->IsExternal() || rep->IsFlat());
+  CordRepSubstring* substring = new CordRepSubstring();
+  substring->length = n;
+  substring->tag = SUBSTRING;
+  substring->start = offset;
+  substring->child = rep;
+  return substring;
+}
+
+// TODO(b/192061034): consider making this a cord rep library function.
+inline CordRep* MakeSubstring(CordRep* rep, size_t offset, size_t n) {
+  if (n == rep->length) return rep;
+  if (n == 0) return CordRep::Unref(rep), nullptr;
+  return CreateSubstring(rep, offset, n);
+}
+
+// TODO(b/192061034): consider making this a cord rep library function.
+inline CordRep* MakeSubstring(CordRep* rep, size_t offset) {
+  if (offset == 0) return rep;
+  return CreateSubstring(rep, offset, rep->length - offset);
+}
+
+// Resizes `edge` to the provided `length`. Adopts a reference on `edge`.
+// This method directly returns `edge` if `length` equals `edge->length`.
+// If `is_mutable` is set to true, this function may return `edge` with
+// `edge->length` set to the new length depending on the type and size of
+// `edge`. Otherwise, this function returns a new CordRepSubstring value.
+// Requires `length > 0 && length <= edge->length`.
+CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) {
+  assert(length > 0);
+  assert(length <= edge->length);
+  assert(IsDataEdge(edge));
+  if (length >= edge->length) return edge;
+
+  if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) {
+    edge->length = length;
+    return edge;
+  }
+
+  return CreateSubstring(edge, 0, length);
+}
+
+template <EdgeType edge_type>
+inline absl::string_view Consume(absl::string_view s, size_t n) {
+  return edge_type == kBack ? s.substr(n) : s.substr(0, s.size() - n);
+}
+
+template <EdgeType edge_type>
+inline absl::string_view Consume(char* dst, absl::string_view s, size_t n) {
+  if (edge_type == kBack) {
+    memcpy(dst, s.data(), n);
+    return s.substr(n);
+  } else {
+    const size_t offset = s.size() - n;
+    memcpy(dst, s.data() + offset, n);
+    return s.substr(0, offset);
+  }
+}
+
+// Known issue / optimization weirdness: the store associated with the
+// decrement introduces traffic between cpus (even if the result of that
+// traffic does nothing), making this faster than a single call to
+// refcount.Decrement() checking the zero refcount condition.
+template <typename R, typename Fn>
+inline void FastUnref(R* r, Fn&& fn) {
+  if (r->refcount.IsOne()) {
+    fn(r);
+  } else if (!r->refcount.DecrementExpectHighRefcount()) {
+    fn(r);
+  }
+}
+
+
+void DeleteSubstring(CordRepSubstring* substring) {
+  CordRep* rep = substring->child;
+  if (!rep->refcount.Decrement()) {
+    if (rep->tag >= FLAT) {
+      CordRepFlat::Delete(rep->flat());
+    } else {
+      assert(rep->tag == EXTERNAL);
+      CordRepExternal::Delete(rep->external());
+    }
+  }
+  delete substring;
+}
+
+// Deletes a leaf node data edge. Requires `IsDataEdge(rep)`.
+void DeleteLeafEdge(CordRep* rep) {
+  assert(IsDataEdge(rep));
+  if (rep->tag >= FLAT) {
+    CordRepFlat::Delete(rep->flat());
+  } else if (rep->tag == EXTERNAL) {
+    CordRepExternal::Delete(rep->external());
+  } else {
+    DeleteSubstring(rep->substring());
+  }
+}
+
+// StackOperations contains the logic to build a left-most or right-most stack
+// (leg) down to the leaf level of a btree, and 'unwind' / 'Finalize' methods to
+// propagate node changes up the stack.
+template <EdgeType edge_type>
+struct StackOperations {
+  // Returns true if the node at 'depth' is not shared, i.e. has a refcount
+  // of one and all of its parent nodes have a refcount of one.
+  inline bool owned(int depth) const { return depth < share_depth; }
+
+  // Returns the node at 'depth'.
+  inline CordRepBtree* node(int depth) const { return stack[depth]; }
+
+  // Builds a `depth` levels deep stack starting at `tree` recording which nodes
+  // are private in the form of the 'share depth' where nodes are shared.
+  inline CordRepBtree* BuildStack(CordRepBtree* tree, int depth) {
+    assert(depth <= tree->height());
+    int current_depth = 0;
+    while (current_depth < depth && tree->refcount.IsOne()) {
+      stack[current_depth++] = tree;
+      tree = tree->Edge(edge_type)->btree();
+    }
+    share_depth = current_depth + (tree->refcount.IsOne() ? 1 : 0);
+    while (current_depth < depth) {
+      stack[current_depth++] = tree;
+      tree = tree->Edge(edge_type)->btree();
+    }
+    return tree;
+  }
+
+  // Builds a stack with the invariant that all nodes are private owned / not
+  // shared. This is used in iterative updates where a previous propagation
+  // guaranteed all nodes are owned / private.
+  inline void BuildOwnedStack(CordRepBtree* tree, int height) {
+    assert(height <= CordRepBtree::kMaxHeight);
+    int depth = 0;
+    while (depth < height) {
+      assert(tree->refcount.IsOne());
+      stack[depth++] = tree;
+      tree = tree->Edge(edge_type)->btree();
+    }
+    assert(tree->refcount.IsOne());
+    share_depth = depth + 1;
+  }
+
+  // Processes the final 'top level' result action for the tree.
+  // See the 'Action' enum for the various action implications.
+  static inline CordRepBtree* Finalize(CordRepBtree* tree, OpResult result) {
+    switch (result.action) {
+      case CordRepBtree::kPopped:
+        tree = edge_type == kBack ? CordRepBtree::New(tree, result.tree)
+                                  : CordRepBtree::New(result.tree, tree);
+        if (ABSL_PREDICT_FALSE(tree->height() > CordRepBtree::kMaxHeight)) {
+          tree = CordRepBtree::Rebuild(tree);
+          ABSL_RAW_CHECK(tree->height() <= CordRepBtree::kMaxHeight,
+                         "Max height exceeded");
+        }
+        return tree;
+      case CordRepBtree::kCopied:
+        CordRep::Unref(tree);
+        ABSL_FALLTHROUGH_INTENDED;
+      case CordRepBtree::kSelf:
+        return result.tree;
+    }
+    ABSL_UNREACHABLE();
+    return result.tree;
+  }
+
+  // Propagate the action result in 'result' up into all nodes of the stack
+  // starting at depth 'depth'. 'length' contains the extra length of data that
+  // was added at the lowest level, and is updated into all nodes of the stack.
+  // See the 'Action' enum for the various action implications.
+  // If 'propagate' is true, then any copied node values are updated into the
+  // stack, which is used for iterative processing on the same stack.
+  template <bool propagate = false>
+  inline CordRepBtree* Unwind(CordRepBtree* tree, int depth, size_t length,
+                              OpResult result) {
+    // TODO(mvels): revisit the below code to check if 3 loops with 3
+    // (incremental) conditions is faster than 1 loop with a switch.
+    // Benchmarking and perf recordings indicate the loop with switch is
+    // fastest, likely because of indirect jumps on the tight case values and
+    // dense branches. But it's worth considering 3 loops, as the `action`
+    // transitions are mono directional. E.g.:
+    //   while (action == kPopped) {
+    //     ...
+    //   }
+    //   while (action == kCopied) {
+    //     ...
+    //   }
+    //   ...
+    // We also  found that an "if () do {}" loop here seems faster, possibly
+    // because it allows the branch predictor more granular heuristics on
+    // 'single leaf' (`depth` == 0) and 'single depth' (`depth` == 1) cases
+    // which appear to be the most common use cases.
+    if (depth != 0) {
+      do {
+        CordRepBtree* node = stack[--depth];
+        const bool owned = depth < share_depth;
+        switch (result.action) {
+          case CordRepBtree::kPopped:
+            assert(!propagate);
+            result = node->AddEdge<edge_type>(owned, result.tree, length);
+            break;
+          case CordRepBtree::kCopied:
+            result = node->SetEdge<edge_type>(owned, result.tree, length);
+            if (propagate) stack[depth] = result.tree;
+            break;
+          case CordRepBtree::kSelf:
+            node->length += length;
+            while (depth > 0) {
+              node = stack[--depth];
+              node->length += length;
+            }
+            return node;
+        }
+      } while (depth > 0);
+    }
+    return Finalize(tree, result);
+  }
+
+  // Invokes `Unwind` with `propagate=true` to update the stack node values.
+  inline CordRepBtree* Propagate(CordRepBtree* tree, int depth, size_t length,
+                                 OpResult result) {
+    return Unwind</*propagate=*/true>(tree, depth, length, result);
+  }
+
+  // `share_depth` contains the depth at which the nodes in the stack become
+  // shared. I.e., if the top most level is shared (i.e.: `!refcount.IsOne()`),
+  // then `share_depth` is 0. If the 2nd node is shared (and implicitly all
+  // nodes below that) then `share_depth` is 1, etc. A `share_depth` greater
+  // than the depth of the stack indicates that none of the nodes in the stack
+  // are shared.
+  int share_depth;
+
+  NodeStack stack;
+};
+
+}  // namespace
+
+void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation) {
+  cord_btree_exhaustive_validation.store(do_exaustive_validation,
+                                         std::memory_order_relaxed);
+}
+
+bool IsCordBtreeExhaustiveValidationEnabled() {
+  return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
+}
+
+void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
+                        bool include_contents, std::ostream& stream) {
+  stream << "===================================\n";
+  if (!label.empty()) {
+    stream << label << '\n';
+    stream << "-----------------------------------\n";
+  }
+  if (rep) {
+    DumpAll(rep, include_contents, stream);
+  } else {
+    stream << "NULL\n";
+  }
+}
+
+void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
+                        std::ostream& stream) {
+  Dump(rep, label, false, stream);
+}
+
+void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) {
+  Dump(rep, absl::string_view(), false, stream);
+}
+
+template <size_t size>
+static void DestroyTree(CordRepBtree* tree) {
+  for (CordRep* node : tree->Edges()) {
+    if (node->refcount.Decrement()) continue;
+    for (CordRep* edge : node->btree()->Edges()) {
+      if (edge->refcount.Decrement()) continue;
+      if (size == 1) {
+        DeleteLeafEdge(edge);
+      } else {
+        CordRepBtree::Destroy(edge->btree());
+      }
+    }
+    CordRepBtree::Delete(node->btree());
+  }
+  CordRepBtree::Delete(tree);
+}
+
+void CordRepBtree::Destroy(CordRepBtree* tree) {
+  switch (tree->height()) {
+    case 0:
+      for (CordRep* edge : tree->Edges()) {
+        if (!edge->refcount.Decrement()) {
+          DeleteLeafEdge(edge);
+        }
+      }
+      return CordRepBtree::Delete(tree);
+    case 1:
+      return DestroyTree<1>(tree);
+    default:
+      return DestroyTree<2>(tree);
+  }
+}
+
+bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
+#define NODE_CHECK_VALID(x)                                           \
+  if (!(x)) {                                                         \
+    ABSL_RAW_LOG(ERROR, "CordRepBtree::CheckValid() FAILED: %s", #x); \
+    return false;                                                     \
+  }
+#define NODE_CHECK_EQ(x, y)                                                    \
+  if ((x) != (y)) {                                                            \
+    ABSL_RAW_LOG(ERROR,                                                        \
+                 "CordRepBtree::CheckValid() FAILED: %s != %s (%s vs %s)", #x, \
+                 #y, absl::StrCat(x).c_str(), absl::StrCat(y).c_str());        \
+    return false;                                                              \
+  }
+
+  NODE_CHECK_VALID(tree != nullptr);
+  NODE_CHECK_VALID(tree->IsBtree());
+  NODE_CHECK_VALID(tree->height() <= kMaxHeight);
+  NODE_CHECK_VALID(tree->begin() < tree->capacity());
+  NODE_CHECK_VALID(tree->end() <= tree->capacity());
+  NODE_CHECK_VALID(tree->begin() <= tree->end());
+  size_t child_length = 0;
+  for (CordRep* edge : tree->Edges()) {
+    NODE_CHECK_VALID(edge != nullptr);
+    if (tree->height() > 0) {
+      NODE_CHECK_VALID(edge->IsBtree());
+      NODE_CHECK_VALID(edge->btree()->height() == tree->height() - 1);
+    } else {
+      NODE_CHECK_VALID(IsDataEdge(edge));
+    }
+    child_length += edge->length;
+  }
+  NODE_CHECK_EQ(child_length, tree->length);
+  if ((!shallow || IsCordBtreeExhaustiveValidationEnabled()) &&
+      tree->height() > 0) {
+    for (CordRep* edge : tree->Edges()) {
+      if (!IsValid(edge->btree(), shallow)) return false;
+    }
+  }
+  return true;
+
+#undef NODE_CHECK_VALID
+#undef NODE_CHECK_EQ
+}
+
+#ifndef NDEBUG
+
+CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree, bool shallow) {
+  if (!IsValid(tree, shallow)) {
+    Dump(tree, "CordRepBtree validation failed:", false, std::cout);
+    ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
+  }
+  return tree;
+}
+
+const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree,
+                                              bool shallow) {
+  if (!IsValid(tree, shallow)) {
+    Dump(tree, "CordRepBtree validation failed:", false, std::cout);
+    ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
+  }
+  return tree;
+}
+
+#endif  // NDEBUG
+
+template <EdgeType edge_type>
+inline OpResult CordRepBtree::AddEdge(bool owned, CordRep* edge, size_t delta) {
+  if (size() >= kMaxCapacity) return {New(edge), kPopped};
+  OpResult result = ToOpResult(owned);
+  result.tree->Add<edge_type>(edge);
+  result.tree->length += delta;
+  return result;
+}
+
+template <EdgeType edge_type>
+OpResult CordRepBtree::SetEdge(bool owned, CordRep* edge, size_t delta) {
+  OpResult result;
+  const size_t idx = index(edge_type);
+  if (owned) {
+    result = {this, kSelf};
+    CordRep::Unref(edges_[idx]);
+  } else {
+    // Create a copy containing all unchanged edges. Unchanged edges are the
+    // open interval [begin, back) or [begin + 1, end) depending on `edge_type`.
+    // We conveniently cover both case using a constexpr `shift` being 0 or 1
+    // as `end :== back + 1`.
+    result = {CopyRaw(length), kCopied};
+    constexpr int shift = edge_type == kFront ? 1 : 0;
+    for (CordRep* r : Edges(begin() + shift, back() + shift)) {
+      CordRep::Ref(r);
+    }
+  }
+  result.tree->edges_[idx] = edge;
+  result.tree->length += delta;
+  return result;
+}
+
+template <EdgeType edge_type>
+CordRepBtree* CordRepBtree::AddCordRep(CordRepBtree* tree, CordRep* rep) {
+  const int depth = tree->height();
+  const size_t length = rep->length;
+  StackOperations<edge_type> ops;
+  CordRepBtree* leaf = ops.BuildStack(tree, depth);
+  const OpResult result =
+      leaf->AddEdge<edge_type>(ops.owned(depth), rep, length);
+  return ops.Unwind(tree, depth, length, result);
+}
+
+template <>
+CordRepBtree* CordRepBtree::NewLeaf<kBack>(absl::string_view data,
+                                           size_t extra) {
+  CordRepBtree* leaf = CordRepBtree::New(0);
+  size_t length = 0;
+  size_t end = 0;
+  const size_t cap = leaf->capacity();
+  while (!data.empty() && end != cap) {
+    auto* flat = CordRepFlat::New(data.length() + extra);
+    flat->length = (std::min)(data.length(), flat->Capacity());
+    length += flat->length;
+    leaf->edges_[end++] = flat;
+    data = Consume<kBack>(flat->Data(), data, flat->length);
+  }
+  leaf->length = length;
+  leaf->set_end(end);
+  return leaf;
+}
+
+template <>
+CordRepBtree* CordRepBtree::NewLeaf<kFront>(absl::string_view data,
+                                            size_t extra) {
+  CordRepBtree* leaf = CordRepBtree::New(0);
+  size_t length = 0;
+  size_t begin = leaf->capacity();
+  leaf->set_end(leaf->capacity());
+  while (!data.empty() && begin != 0) {
+    auto* flat = CordRepFlat::New(data.length() + extra);
+    flat->length = (std::min)(data.length(), flat->Capacity());
+    length += flat->length;
+    leaf->edges_[--begin] = flat;
+    data = Consume<kFront>(flat->Data(), data, flat->length);
+  }
+  leaf->length = length;
+  leaf->set_begin(begin);
+  return leaf;
+}
+
+template <>
+absl::string_view CordRepBtree::AddData<kBack>(absl::string_view data,
+                                               size_t extra) {
+  assert(!data.empty());
+  assert(size() < capacity());
+  AlignBegin();
+  const size_t cap = capacity();
+  do {
+    CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
+    const size_t n = (std::min)(data.length(), flat->Capacity());
+    flat->length = n;
+    edges_[fetch_add_end(1)] = flat;
+    data = Consume<kBack>(flat->Data(), data, n);
+  } while (!data.empty() && end() != cap);
+  return data;
+}
+
+template <>
+absl::string_view CordRepBtree::AddData<kFront>(absl::string_view data,
+                                                size_t extra) {
+  assert(!data.empty());
+  assert(size() < capacity());
+  AlignEnd();
+  do {
+    CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
+    const size_t n = (std::min)(data.length(), flat->Capacity());
+    flat->length = n;
+    edges_[sub_fetch_begin(1)] = flat;
+    data = Consume<kFront>(flat->Data(), data, n);
+  } while (!data.empty() && begin() != 0);
+  return data;
+}
+
+template <EdgeType edge_type>
+CordRepBtree* CordRepBtree::AddData(CordRepBtree* tree, absl::string_view data,
+                                    size_t extra) {
+  if (ABSL_PREDICT_FALSE(data.empty())) return tree;
+
+  const size_t original_data_size = data.size();
+  int depth = tree->height();
+  StackOperations<edge_type> ops;
+  CordRepBtree* leaf = ops.BuildStack(tree, depth);
+
+  // If there is capacity in the last edge, append as much data
+  // as possible into this last edge.
+  if (leaf->size() < leaf->capacity()) {
+    OpResult result = leaf->ToOpResult(ops.owned(depth));
+    data = result.tree->AddData<edge_type>(data, extra);
+    if (data.empty()) {
+      result.tree->length += original_data_size;
+      return ops.Unwind(tree, depth, original_data_size, result);
+    }
+
+    // We added some data into this leaf, but not all. Propagate the added
+    // length to the top most node, and rebuild the stack with any newly copied
+    // or updated nodes. From this point on, the path (leg) from the top most
+    // node to the right-most node towards the leaf node is privately owned.
+    size_t delta = original_data_size - data.size();
+    assert(delta > 0);
+    result.tree->length += delta;
+    tree = ops.Propagate(tree, depth, delta, result);
+    ops.share_depth = depth + 1;
+  }
+
+  // We were unable to append all data into the existing right-most leaf node.
+  // This means all remaining data must be put into (a) new leaf node(s) which
+  // we append to the tree. To make this efficient, we iteratively build full
+  // leaf nodes from `data` until the created leaf contains all remaining data.
+  // We utilize the `Unwind` method to merge the created leaf into the first
+  // level towards root that has capacity. On each iteration with remaining
+  // data, we rebuild the stack in the knowledge that right-most nodes are
+  // privately owned after the first `Unwind` completes.
+  for (;;) {
+    OpResult result = {CordRepBtree::NewLeaf<edge_type>(data, extra), kPopped};
+    if (result.tree->length == data.size()) {
+      return ops.Unwind(tree, depth, result.tree->length, result);
+    }
+    data = Consume<edge_type>(data, result.tree->length);
+    tree = ops.Unwind(tree, depth, result.tree->length, result);
+    depth = tree->height();
+    ops.BuildOwnedStack(tree, depth);
+  }
+}
+
+template <EdgeType edge_type>
+CordRepBtree* CordRepBtree::Merge(CordRepBtree* dst, CordRepBtree* src) {
+  assert(dst->height() >= src->height());
+
+  // Capture source length as we may consume / destroy `src`.
+  const size_t length = src->length;
+
+  // We attempt to merge `src` at its corresponding height in `dst`.
+  const int depth = dst->height() - src->height();
+  StackOperations<edge_type> ops;
+  CordRepBtree* merge_node = ops.BuildStack(dst, depth);
+
+  // If there is enough space in `merge_node` for all edges from `src`, add all
+  // edges to this node, making a fresh copy as needed if not privately owned.
+  // If `merge_node` does not have capacity for `src`, we rely on `Unwind` and
+  // `Finalize` to merge `src` into the first level towards `root` where there
+  // is capacity for another edge, or create a new top level node.
+  OpResult result;
+  if (merge_node->size() + src->size() <= kMaxCapacity) {
+    result = merge_node->ToOpResult(ops.owned(depth));
+    result.tree->Add<edge_type>(src->Edges());
+    result.tree->length += src->length;
+    if (src->refcount.IsOne()) {
+      Delete(src);
+    } else {
+      for (CordRep* edge : src->Edges()) CordRep::Ref(edge);
+      CordRepBtree::Unref(src);
+    }
+  } else {
+    result = {src, kPopped};
+  }
+
+  // Unless we merged at the top level (i.e.: src and dst are equal height),
+  // unwind the result towards the top level, and finalize the result.
+  if (depth) {
+    return ops.Unwind(dst, depth, length, result);
+  }
+  return ops.Finalize(dst, result);
+}
+
+CopyResult CordRepBtree::CopySuffix(size_t offset) {
+  assert(offset < this->length);
+
+  // As long as `offset` starts inside the last edge, we can 'drop' the current
+  // depth. For the most extreme example: if offset references the last data
+  // edge in the tree, there is only a single edge / path from the top of the
+  // tree to that last edge, so we can drop all the nodes except that edge.
+  // The fast path check for this is `back->length >= length - offset`.
+  int height = this->height();
+  CordRepBtree* node = this;
+  size_t len = node->length - offset;
+  CordRep* back = node->Edge(kBack);
+  while (back->length >= len) {
+    offset = back->length - len;
+    if (--height < 0) {
+      return {MakeSubstring(CordRep::Ref(back), offset), height};
+    }
+    node = back->btree();
+    back = node->Edge(kBack);
+  }
+  if (offset == 0) return {CordRep::Ref(node), height};
+
+  // Offset does not point into the last edge, so we span at least two edges.
+  // Find the index of offset with `IndexBeyond` which provides us the edge
+  // 'beyond' the offset if offset is not a clean starting point of an edge.
+  Position pos = node->IndexBeyond(offset);
+  CordRepBtree* sub = node->CopyToEndFrom(pos.index, len);
+  const CopyResult result = {sub, height};
+
+  // `pos.n` contains a non zero value if the offset is not an exact starting
+  // point of an edge. In this case, `pos.n` contains the 'trailing' amount of
+  // bytes of the edge preceding that in `pos.index`. We need to iteratively
+  // adjust the preceding edge with the 'broken' offset until we have a perfect
+  // start of the edge.
+  while (pos.n != 0) {
+    assert(pos.index >= 1);
+    const size_t begin = pos.index - 1;
+    sub->set_begin(begin);
+    CordRep* const edge = node->Edge(begin);
+
+    len = pos.n;
+    offset = edge->length - len;
+
+    if (--height < 0) {
+      sub->edges_[begin] = MakeSubstring(CordRep::Ref(edge), offset, len);
+      return result;
+    }
+
+    node = edge->btree();
+    pos = node->IndexBeyond(offset);
+
+    CordRepBtree* nsub = node->CopyToEndFrom(pos.index, len);
+    sub->edges_[begin] = nsub;
+    sub = nsub;
+  }
+  sub->set_begin(pos.index);
+  return result;
+}
+
+CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) {
+  assert(n > 0);
+  assert(n <= this->length);
+
+  // As long as `n` does not exceed the length of the first edge, we can 'drop'
+  // the current depth. For the most extreme example: if we'd copy a 1 byte
+  // prefix from a tree, there is only a single edge / path from the top of the
+  // tree to the single data edge containing this byte, so we can drop all the
+  // nodes except the data node.
+  int height = this->height();
+  CordRepBtree* node = this;
+  CordRep* front = node->Edge(kFront);
+  if (allow_folding) {
+    while (front->length >= n) {
+      if (--height < 0) return {MakeSubstring(CordRep::Ref(front), 0, n), -1};
+      node = front->btree();
+      front = node->Edge(kFront);
+    }
+  }
+  if (node->length == n) return {CordRep::Ref(node), height};
+
+  // `n` spans at least two nodes, find the end point of the span.
+  Position pos = node->IndexOf(n);
+
+  // Create a partial copy of the node up to `pos.index`, with a defined length
+  // of `n`. Any 'partial last edge' is added further below as needed.
+  CordRepBtree* sub = node->CopyBeginTo(pos.index, n);
+  const CopyResult result = {sub, height};
+
+  // `pos.n` contains the 'offset inside the edge for IndexOf(n)'. As long as
+  // this is not zero, we don't have a 'clean cut', so we need to make a
+  // (partial) copy of that last edge, and repeat this until pos.n is zero.
+  while (pos.n != 0) {
+    size_t end = pos.index;
+    n = pos.n;
+
+    CordRep* edge = node->Edge(pos.index);
+    if (--height < 0) {
+      sub->edges_[end++] = MakeSubstring(CordRep::Ref(edge), 0, n);
+      sub->set_end(end);
+      AssertValid(result.edge->btree());
+      return result;
+    }
+
+    node = edge->btree();
+    pos = node->IndexOf(n);
+    CordRepBtree* nsub = node->CopyBeginTo(pos.index, n);
+    sub->edges_[end++] = nsub;
+    sub->set_end(end);
+    sub = nsub;
+  }
+  sub->set_end(pos.index);
+  AssertValid(result.edge->btree());
+  return result;
+}
+
+CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
+  CordRep* front = tree->Edge(tree->begin());
+  if (tree->refcount.IsOne()) {
+    Unref(tree->Edges(tree->begin() + 1, tree->end()));
+    CordRepBtree::Delete(tree);
+  } else {
+    CordRep::Ref(front);
+    CordRep::Unref(tree);
+  }
+  return front;
+}
+
+CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end,
+                                           size_t new_length) {
+  assert(end <= tree->end());
+  if (tree->refcount.IsOne()) {
+    Unref(tree->Edges(end, tree->end()));
+    tree->set_end(end);
+    tree->length = new_length;
+  } else {
+    CordRepBtree* old = tree;
+    tree = tree->CopyBeginTo(end, new_length);
+    CordRep::Unref(old);
+  }
+  return tree;
+}
+
+CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
+  // Check input and deal with trivial cases 'Remove all/none'
+  assert(tree != nullptr);
+  assert(n <= tree->length);
+  const size_t len = tree->length;
+  if (ABSL_PREDICT_FALSE(n == 0)) {
+    return tree;
+  }
+  if (ABSL_PREDICT_FALSE(n >= len)) {
+    CordRepBtree::Unref(tree);
+    return nullptr;
+  }
+
+  size_t length = len - n;
+  int height = tree->height();
+  bool is_mutable = tree->refcount.IsOne();
+
+  // Extract all top nodes which are reduced to size = 1
+  Position pos = tree->IndexOfLength(length);
+  while (pos.index == tree->begin()) {
+    CordRep* edge = ExtractFront(tree);
+    is_mutable &= edge->refcount.IsOne();
+    if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
+    tree = edge->btree();
+    pos = tree->IndexOfLength(length);
+  }
+
+  // Repeat the following sequence traversing down the tree:
+  // - Crop the top node to the 'last remaining edge' adjusting length.
+  // - Set the length for down edges to the partial length in that last edge.
+  // - Repeat this until the last edge is 'included in full'
+  // - If we hit the data edge level, resize and return the last data edge
+  CordRepBtree* top = tree = ConsumeBeginTo(tree, pos.index + 1, length);
+  CordRep* edge = tree->Edge(pos.index);
+  length = pos.n;
+  while (length != edge->length) {
+    // ConsumeBeginTo guarantees `tree` is a clean, privately owned copy.
+    assert(tree->refcount.IsOne());
+    const bool edge_is_mutable = edge->refcount.IsOne();
+
+    if (height-- == 0) {
+      tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
+      return AssertValid(top);
+    }
+
+    if (!edge_is_mutable) {
+      // We can't 'in place' remove any suffixes down this edge.
+      // Replace this edge with a prefix copy instead.
+      tree->edges_[pos.index] = edge->btree()->CopyPrefix(length, false).edge;
+      CordRep::Unref(edge);
+      return AssertValid(top);
+    }
+
+    // Move down one level, rinse repeat.
+    tree = edge->btree();
+    pos = tree->IndexOfLength(length);
+    tree = ConsumeBeginTo(edge->btree(), pos.index + 1, length);
+    edge = tree->Edge(pos.index);
+    length = pos.n;
+  }
+
+  return AssertValid(top);
+}
+
+CordRep* CordRepBtree::SubTree(size_t offset, size_t n) {
+  assert(n <= this->length);
+  assert(offset <= this->length - n);
+  if (ABSL_PREDICT_FALSE(n == 0)) return nullptr;
+
+  CordRepBtree* node = this;
+  int height = node->height();
+  Position front = node->IndexOf(offset);
+  CordRep* left = node->edges_[front.index];
+  while (front.n + n <= left->length) {
+    if (--height < 0) return MakeSubstring(CordRep::Ref(left), front.n, n);
+    node = left->btree();
+    front = node->IndexOf(front.n);
+    left = node->edges_[front.index];
+  }
+
+  const Position back = node->IndexBefore(front, n);
+  CordRep* const right = node->edges_[back.index];
+  assert(back.index > front.index);
+
+  // Get partial suffix and prefix entries.
+  CopyResult prefix;
+  CopyResult suffix;
+  if (height > 0) {
+    // Copy prefix and suffix of the boundary nodes.
+    prefix = left->btree()->CopySuffix(front.n);
+    suffix = right->btree()->CopyPrefix(back.n);
+
+    // If there is an edge between the prefix and suffix edges, then the tree
+    // must remain at its previous (full) height. If we have no edges between
+    // prefix and suffix edges, then the tree must be as high as either the
+    // suffix or prefix edges (which are collapsed to their minimum heights).
+    if (front.index + 1 == back.index) {
+      height = (std::max)(prefix.height, suffix.height) + 1;
+    }
+
+    // Raise prefix and suffixes to the new tree height.
+    for (int h = prefix.height + 1; h < height; ++h) {
+      prefix.edge = CordRepBtree::New(prefix.edge);
+    }
+    for (int h = suffix.height + 1; h < height; ++h) {
+      suffix.edge = CordRepBtree::New(suffix.edge);
+    }
+  } else {
+    // Leaf node, simply take substrings for prefix and suffix.
+    prefix = CopyResult{MakeSubstring(CordRep::Ref(left), front.n), -1};
+    suffix = CopyResult{MakeSubstring(CordRep::Ref(right), 0, back.n), -1};
+  }
+
+  // Compose resulting tree.
+  CordRepBtree* sub = CordRepBtree::New(height);
+  size_t end = 0;
+  sub->edges_[end++] = prefix.edge;
+  for (CordRep* r : node->Edges(front.index + 1, back.index)) {
+    sub->edges_[end++] = CordRep::Ref(r);
+  }
+  sub->edges_[end++] = suffix.edge;
+  sub->set_end(end);
+  sub->length = n;
+  return AssertValid(sub);
+}
+
+CordRepBtree* CordRepBtree::MergeTrees(CordRepBtree* left,
+                                       CordRepBtree* right) {
+  return left->height() >= right->height() ? Merge<kBack>(left, right)
+                                           : Merge<kFront>(right, left);
+}
+
+bool CordRepBtree::IsFlat(absl::string_view* fragment) const {
+  if (height() == 0 && size() == 1) {
+    if (fragment) *fragment = Data(begin());
+    return true;
+  }
+  return false;
+}
+
+bool CordRepBtree::IsFlat(size_t offset, const size_t n,
+                          absl::string_view* fragment) const {
+  assert(n <= this->length);
+  assert(offset <= this->length - n);
+  if (ABSL_PREDICT_FALSE(n == 0)) return false;
+  int height = this->height();
+  const CordRepBtree* node = this;
+  for (;;) {
+    const Position front = node->IndexOf(offset);
+    const CordRep* edge = node->Edge(front.index);
+    if (edge->length < front.n + n) return false;
+    if (--height < 0) {
+      if (fragment) *fragment = EdgeData(edge).substr(front.n, n);
+      return true;
+    }
+    offset = front.n;
+    node = node->Edge(front.index)->btree();
+  }
+}
+
+char CordRepBtree::GetCharacter(size_t offset) const {
+  assert(offset < length);
+  const CordRepBtree* node = this;
+  int height = node->height();
+  for (;;) {
+    Position front = node->IndexOf(offset);
+    if (--height < 0) return node->Data(front.index)[front.n];
+    offset = front.n;
+    node = node->Edge(front.index)->btree();
+  }
+}
+
+Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
+  // The inlined version in `GetAppendBuffer()` deals with all heights <= 3.
+  assert(height() >= 4);
+  assert(refcount.IsOne());
+
+  // Build a stack of nodes we may potentially need to update if we find a
+  // non-shared FLAT with capacity at the leaf level.
+  const int depth = height();
+  CordRepBtree* node = this;
+  CordRepBtree* stack[kMaxDepth];
+  for (int i = 0; i < depth; ++i) {
+    node = node->Edge(kBack)->btree();
+    if (!node->refcount.IsOne()) return {};
+    stack[i] = node;
+  }
+
+  // Must be a privately owned, mutable flat.
+  CordRep* const edge = node->Edge(kBack);
+  if (!edge->refcount.IsOne() || edge->tag < FLAT) return {};
+
+  // Must have capacity.
+  const size_t avail = edge->flat()->Capacity() - edge->length;
+  if (avail == 0) return {};
+
+  // Build span on remaining capacity.
+  size_t delta = (std::min)(size, avail);
+  Span<char> span = {edge->flat()->Data() + edge->length, delta};
+  edge->length += delta;
+  this->length += delta;
+  for (int i = 0; i < depth; ++i) {
+    stack[i]->length += delta;
+  }
+  return span;
+}
+
+CordRepBtree* CordRepBtree::CreateSlow(CordRep* rep) {
+  if (rep->IsBtree()) return rep->btree();
+
+  CordRepBtree* node = nullptr;
+  auto consume = [&node](CordRep* r, size_t offset, size_t length) {
+    r = MakeSubstring(r, offset, length);
+    if (node == nullptr) {
+      node = New(r);
+    } else {
+      node = CordRepBtree::AddCordRep<kBack>(node, r);
+    }
+  };
+  Consume(rep, consume);
+  return node;
+}
+
+CordRepBtree* CordRepBtree::AppendSlow(CordRepBtree* tree, CordRep* rep) {
+  if (ABSL_PREDICT_TRUE(rep->IsBtree())) {
+    return MergeTrees(tree, rep->btree());
+  }
+  auto consume = [&tree](CordRep* r, size_t offset, size_t length) {
+    r = MakeSubstring(r, offset, length);
+    tree = CordRepBtree::AddCordRep<kBack>(tree, r);
+  };
+  Consume(rep, consume);
+  return tree;
+}
+
+CordRepBtree* CordRepBtree::PrependSlow(CordRepBtree* tree, CordRep* rep) {
+  if (ABSL_PREDICT_TRUE(rep->IsBtree())) {
+    return MergeTrees(rep->btree(), tree);
+  }
+  auto consume = [&tree](CordRep* r, size_t offset, size_t length) {
+    r = MakeSubstring(r, offset, length);
+    tree = CordRepBtree::AddCordRep<kFront>(tree, r);
+  };
+  ReverseConsume(rep, consume);
+  return tree;
+}
+
+CordRepBtree* CordRepBtree::Append(CordRepBtree* tree, absl::string_view data,
+                                   size_t extra) {
+  return CordRepBtree::AddData<kBack>(tree, data, extra);
+}
+
+CordRepBtree* CordRepBtree::Prepend(CordRepBtree* tree, absl::string_view data,
+                                    size_t extra) {
+  return CordRepBtree::AddData<kFront>(tree, data, extra);
+}
+
+template CordRepBtree* CordRepBtree::AddCordRep<kFront>(CordRepBtree* tree,
+                                                        CordRep* rep);
+template CordRepBtree* CordRepBtree::AddCordRep<kBack>(CordRepBtree* tree,
+                                                       CordRep* rep);
+template CordRepBtree* CordRepBtree::AddData<kFront>(CordRepBtree* tree,
+                                                     absl::string_view data,
+                                                     size_t extra);
+template CordRepBtree* CordRepBtree::AddData<kBack>(CordRepBtree* tree,
+                                                    absl::string_view data,
+                                                    size_t extra);
+
+void CordRepBtree::Rebuild(CordRepBtree** stack, CordRepBtree* tree,
+                           bool consume) {
+  bool owned = consume && tree->refcount.IsOne();
+  if (tree->height() == 0) {
+    for (CordRep* edge : tree->Edges()) {
+      if (!owned) edge = CordRep::Ref(edge);
+      size_t height = 0;
+      size_t length = edge->length;
+      CordRepBtree* node = stack[0];
+      OpResult result = node->AddEdge<kBack>(true, edge, length);
+      while (result.action == CordRepBtree::kPopped) {
+        stack[height] = result.tree;
+        if (stack[++height] == nullptr) {
+          result.action = CordRepBtree::kSelf;
+          stack[height] = CordRepBtree::New(node, result.tree);
+        } else {
+          node = stack[height];
+          result = node->AddEdge<kBack>(true, result.tree, length);
+        }
+      }
+      while (stack[++height] != nullptr) {
+        stack[height]->length += length;
+      }
+    }
+  } else {
+    for (CordRep* rep : tree->Edges()) {
+      Rebuild(stack, rep->btree(), owned);
+    }
+  }
+  if (consume) {
+    if (owned) {
+      CordRepBtree::Delete(tree);
+    } else {
+      CordRepBtree::Unref(tree);
+    }
+  }
+}
+
+CordRepBtree* CordRepBtree::Rebuild(CordRepBtree* tree) {
+  // Set up initial stack with empty leaf node.
+  CordRepBtree* node = CordRepBtree::New();
+  CordRepBtree* stack[CordRepBtree::kMaxDepth + 1] = {node};
+
+  // Recursively build the tree, consuming the input tree.
+  Rebuild(stack, tree, /* consume reference */ true);
+
+  // Return top most node
+  for (CordRepBtree* parent : stack) {
+    if (parent == nullptr) return node;
+    node = parent;
+  }
+
+  // Unreachable
+  assert(false);
+  return nullptr;
+}
+
+CordRepBtree::ExtractResult CordRepBtree::ExtractAppendBuffer(
+    CordRepBtree* tree, size_t extra_capacity) {
+  int depth = 0;
+  NodeStack stack;
+
+  // Set up default 'no success' result which is {tree, nullptr}.
+  ExtractResult result;
+  result.tree = tree;
+  result.extracted = nullptr;
+
+  // Dive down the right side of the tree, making sure no edges are shared.
+  while (tree->height() > 0) {
+    if (!tree->refcount.IsOne()) return result;
+    stack[depth++] = tree;
+    tree = tree->Edge(kBack)->btree();
+  }
+  if (!tree->refcount.IsOne()) return result;
+
+  // Validate we ended on a non shared flat.
+  CordRep* rep = tree->Edge(kBack);
+  if (!(rep->IsFlat() && rep->refcount.IsOne())) return result;
+
+  // Verify it has at least the requested extra capacity.
+  CordRepFlat* flat = rep->flat();
+  const size_t length = flat->length;
+  const size_t avail = flat->Capacity() - flat->length;
+  if (extra_capacity > avail) return result;
+
+  // Set the extracted flat in the result.
+  result.extracted = flat;
+
+  // Cascading delete all nodes that become empty.
+  while (tree->size() == 1) {
+    CordRepBtree::Delete(tree);
+    if (--depth < 0) {
+      // We consumed the entire tree: return nullptr for new tree.
+      result.tree = nullptr;
+      return result;
+    }
+    rep = tree;
+    tree = stack[depth];
+  }
+
+  // Remove the edge or cascaded up parent node.
+  tree->set_end(tree->end() - 1);
+  tree->length -= length;
+
+  // Adjust lengths up the tree.
+  while (depth > 0) {
+    tree = stack[--depth];
+    tree->length -= length;
+  }
+
+  // Remove unnecessary top nodes with size = 1. This may iterate all the way
+  // down to the leaf node in which case we simply return the remaining last
+  // edge in that node and the extracted flat.
+  while (tree->size() == 1) {
+    int height = tree->height();
+    rep = tree->Edge(kBack);
+    Delete(tree);
+    if (height == 0) {
+      // We consumed the leaf: return the sole data edge as the new tree.
+      result.tree = rep;
+      return result;
+    }
+    tree = rep->btree();
+  }
+
+  // Done: return the (new) top level node and extracted flat.
+  result.tree = tree;
+  return result;
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree.h b/abseil-cpp/absl/strings/internal/cord_rep_btree.h
new file mode 100644
index 0000000..be94b62
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree.h
@@ -0,0 +1,944 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_
+
+#include <cassert>
+#include <cstdint>
+#include <iosfwd>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/optimization.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// `SetCordBtreeExhaustiveValidation()` can be set to force exhaustive
+// validation in debug assertions, and code that calls `IsValid()`
+// explicitly. By default, assertions should be relatively cheap and
+// AssertValid() can easily lead to O(n^2) complexity as recursive / full tree
+// validation is O(n).
+void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation);
+bool IsCordBtreeExhaustiveValidationEnabled();
+
+class CordRepBtreeNavigator;
+
+// CordRepBtree is as the name implies a btree implementation of a Cordrep tree.
+// Data is stored at the leaf level only, non leaf nodes contain down pointers
+// only. Allowed types of data edges are FLAT, EXTERNAL and SUBSTRINGs of FLAT
+// or EXTERNAL nodes. The implementation allows for data to be added to either
+// end of the tree only, it does not provide any 'insert' logic. This has the
+// benefit that we can expect good fill ratios: all nodes except the outer
+// 'legs' will have 100% fill ratios for trees built using Append/Prepend
+// methods. Merged trees will typically have a fill ratio well above 50% as in a
+// similar fashion, one side of the merged tree will typically have a 100% fill
+// ratio, and the 'open' end will average 50%. All operations are O(log(n)) or
+// better, and the tree never needs balancing.
+//
+// All methods accepting a CordRep* or CordRepBtree* adopt a reference on that
+// input unless explicitly stated otherwise. All functions returning a CordRep*
+// or CordRepBtree* instance transfer a reference back to the caller.
+// Simplified, callers both 'donate' and 'consume' a reference count on each
+// call, simplifying the API. An example of building a tree:
+//
+//   CordRepBtree* tree = CordRepBtree::Create(MakeFlat("Hello"));
+//   tree = CordRepBtree::Append(tree, MakeFlat("world"));
+//
+// In the above example, all inputs are consumed, making each call affecting
+// `tree` reference count neutral. The returned `tree` value can be different
+// from the input if the input is shared with other threads, or if the tree
+// grows in height, but callers typically never have to concern themselves with
+// that and trust that all methods DTRT at all times.
+class CordRepBtree : public CordRep {
+ public:
+  // EdgeType identifies `front` and `back` enum values.
+  // Various implementations in CordRepBtree such as `Add` and `Edge` are
+  // generic and templated on operating on either of the boundary edges.
+  // For more information on the possible edges contained in a CordRepBtree
+  // instance see the documentation for `edges_`.
+  enum class EdgeType { kFront, kBack };
+
+  // Convenience constants into `EdgeType`
+  static constexpr EdgeType kFront = EdgeType::kFront;
+  static constexpr EdgeType kBack = EdgeType::kBack;
+
+  // Maximum number of edges: based on experiments and performance data, we can
+  // pick suitable values resulting in optimum cacheline aligned values. The
+  // preferred values are based on 64-bit systems where we aim to align this
+  // class onto 64 bytes, i.e.:  6 = 64 bytes, 14 = 128 bytes, etc.
+  // TODO(b/192061034): experiment with alternative sizes.
+  static constexpr size_t kMaxCapacity = 6;
+
+  // Reasonable maximum height of the btree. We can expect a fill ratio of at
+  // least 50%: trees are always expanded at the front or back. Concatenating
+  // trees will then typically fold at the top most node, where the lower nodes
+  // are at least at capacity on one side of joined inputs. At a lower fill
+  // rate of 4 edges per node, we have capacity for ~16 million leaf nodes.
+  // We will fail / abort if an application ever exceeds this height, which
+  // should be extremely rare (near impossible) and be an indication of an
+  // application error: we do not assume it reasonable for any application to
+  // operate correctly with such monster trees.
+  // Another compelling reason for the number `12` is that any contextual stack
+  // required for navigation or insertion requires 12 words and 12 bytes, which
+  // fits inside 2 cache lines with some room to spare, and is reasonable as a
+  // local stack variable compared to Cord's current near 400 bytes stack use.
+  // The maximum `height` value of a node is then `kMaxDepth - 1` as node height
+  // values start with a value of 0 for leaf nodes.
+  static constexpr size_t kMaxDepth = 12;
+  // See comments on height() for why this is an int and not a size_t.
+  static constexpr int kMaxHeight = static_cast<int>(kMaxDepth - 1);
+
+  // `Action` defines the action for unwinding changes done at the btree's leaf
+  // level that need to be propagated up to the parent node(s). Each operation
+  // on a node has an effect / action defined as follows:
+  // - kSelf
+  //   The operation (add / update, etc) was performed directly on the node as
+  //   the node is private to the current thread (i.e.: not shared directly or
+  //   indirectly through a refcount > 1). Changes can be propagated directly to
+  //   all parent nodes as all parent nodes are also then private to the current
+  //   thread.
+  // - kCopied
+  //   The operation (add / update, etc) was performed on a copy of the original
+  //   node, as the node is (potentially) directly or indirectly shared with
+  //   other threads. Changes need to be propagated into the parent nodes where
+  //   the old down pointer must be unreffed and replaced with this new copy.
+  //   Such changes to parent nodes may themselves require a copy if the parent
+  //   node is also shared. A kCopied action can propagate all the way to the
+  //   top node where we then must unref the `tree` input provided by the
+  //   caller, and return the new copy.
+  // - kPopped
+  //   The operation (typically add) could not be satisfied due to insufficient
+  //   capacity in the targeted node, and a new 'leg' was created that needs to
+  //   be added into the parent node. For example, adding a FLAT inside a leaf
+  //   node that is at capacity will create a new leaf node containing that
+  //   FLAT, that needs to be 'popped' up the btree. Such 'pop' actions can
+  //   cascade up the tree if parent nodes are also at capacity. A 'Popped'
+  //   action propagating all the way to the top of the tree will result in
+  //   the tree becoming one level higher than the current tree through a final
+  //   `CordRepBtree::New(tree, popped)` call, resulting in a new top node
+  //   referencing the old tree and the new (fully popped upwards) 'leg'.
+  enum Action { kSelf, kCopied, kPopped };
+
+  // Result of an operation on a node. See the `Action` enum for details.
+  struct OpResult {
+    CordRepBtree* tree;
+    Action action;
+  };
+
+  // Return value of the CopyPrefix and CopySuffix methods which can
+  // return a node or data edge at any height inside the tree.
+  // A height of 0 defines the lowest (leaf) node, a height of -1 identifies
+  // `edge` as being a plain data node: EXTERNAL / FLAT or SUBSTRING thereof.
+  struct CopyResult {
+    CordRep* edge;
+    int height;
+  };
+
+  // Logical position inside a node:
+  // - index: index of the edge.
+  // - n: size or offset value depending on context.
+  struct Position {
+    size_t index;
+    size_t n;
+  };
+
+  // Creates a btree from the given input. Adopts a ref of `rep`.
+  // If the input `rep` is itself a btree, i.e., `IsBtree()`, then this
+  // function immediately returns `rep->btree()`. If the input is a valid data
+  // edge (see IsDataEdge()), then a new leaf node is returned containing `rep`
+  // as the sole data edge. Else, the input is assumed to be a (legacy) concat
+  // tree, and the input is consumed and transformed into a btree().
+  static CordRepBtree* Create(CordRep* rep);
+
+  // Destroys the provided tree. Should only be called by cord internal API's,
+  // typically after a ref_count.Decrement() on the last reference count.
+  static void Destroy(CordRepBtree* tree);
+
+  // Destruction
+  static void Delete(CordRepBtree* tree) { delete tree; }
+
+  // Use CordRep::Unref() as we overload for absl::Span<CordRep* const>.
+  using CordRep::Unref;
+
+  // Unrefs all edges in `edges` which are assumed to be 'likely one'.
+  static void Unref(absl::Span<CordRep* const> edges);
+
+  // Appends / Prepends an existing CordRep instance to this tree.
+  // The below methods accept three types of input:
+  // 1) `rep` is a data node (See `IsDataNode` for valid data edges).
+  // `rep` is appended or prepended to this tree 'as is'.
+  // 2) `rep` is a BTREE.
+  // `rep` is merged into `tree` respecting the Append/Prepend order.
+  // 3) `rep` is some other (legacy) type.
+  // `rep` is converted in place and added to `tree`
+  // Requires `tree` and `rep` to be not null.
+  static CordRepBtree* Append(CordRepBtree* tree, CordRep* rep);
+  static CordRepBtree* Prepend(CordRepBtree* tree, CordRep* rep);
+
+  // Append/Prepend the data in `data` to this tree.
+  // The `extra` parameter defines how much extra capacity should be allocated
+  // for any additional FLAT being allocated. This is an optimization hint from
+  // the caller. For example, a caller may need to add 2 string_views of data
+  // "abc" and "defghi" which are not consecutive. The caller can in this case
+  // invoke `AddData(tree, "abc", 6)`, and any newly added flat is allocated
+  // where possible with at least 6 bytes of extra capacity beyond `length`.
+  // This helps avoiding data getting fragmented over multiple flats.
+  // There is no limit on the size of `data`. If `data` can not be stored inside
+  // a single flat, then the function will iteratively add flats until all data
+  // has been consumed and appended or prepended to the tree.
+  static CordRepBtree* Append(CordRepBtree* tree, string_view data,
+                              size_t extra = 0);
+  static CordRepBtree* Prepend(CordRepBtree* tree, string_view data,
+                               size_t extra = 0);
+
+  // Returns a new tree, containing `n` bytes of data from this instance
+  // starting at offset `offset`. Where possible, the returned tree shares
+  // (re-uses) data edges and nodes with this instance to minimize the
+  // combined memory footprint of both trees.
+  // Requires `offset + n <= length`. Returns `nullptr` if `n` is zero.
+  CordRep* SubTree(size_t offset, size_t n);
+
+  // Removes `n` trailing bytes from `tree`, and returns the resulting tree
+  // or data edge. Returns `tree` if n is zero, and nullptr if n == length.
+  // This function is logically identical to:
+  //   result = tree->SubTree(0, tree->length - n);
+  //   Unref(tree);
+  //   return result;
+  // However, the actual implementation will as much as possible perform 'in
+  // place' modifications on the tree on all nodes and edges that are mutable.
+  // For example, in a fully privately owned tree with the last edge being a
+  // flat of length 12, RemoveSuffix(1) will simply set the length of that data
+  // edge to 11, and reduce the length of all nodes on the edge path by 1.
+  static CordRep* RemoveSuffix(CordRepBtree* tree, size_t n);
+
+  // Returns the character at the given offset.
+  char GetCharacter(size_t offset) const;
+
+  // Returns true if this node holds a single data edge, and if so, sets
+  // `fragment` to reference the contained data. `fragment` is an optional
+  // output parameter and allowed to be null.
+  bool IsFlat(absl::string_view* fragment) const;
+
+  // Returns true if the data of `n` bytes starting at offset `offset`
+  // is contained in a single data edge, and if so, sets fragment to reference
+  // the contained data. `fragment` is an optional output parameter and allowed
+  // to be null.
+  bool IsFlat(size_t offset, size_t n, absl::string_view* fragment) const;
+
+  // Returns a span (mutable range of bytes) of up to `size` bytes into the
+  // last FLAT data edge inside this tree under the following conditions:
+  // - none of the nodes down into the FLAT node are shared.
+  // - the last data edge in this tree is a non-shared FLAT.
+  // - the referenced FLAT has additional capacity available.
+  // If all these conditions are met, a non-empty span is returned, and the
+  // length of the flat node and involved tree nodes have been increased by
+  // `span.length()`. The caller is responsible for immediately assigning values
+  // to all uninitialized data reference by the returned span.
+  // Requires `this->refcount.IsOne()`: this function forces the caller to do
+  // this fast path check on the top level node, as this is the most commonly
+  // shared node of a cord tree.
+  Span<char> GetAppendBuffer(size_t size);
+
+  // Extracts the right-most data edge from this tree iff:
+  // - the tree and all internal edges to the right-most node are not shared.
+  // - the right-most node is a FLAT node and not shared.
+  // - the right-most node has at least the desired extra capacity.
+  //
+  // Returns {tree, nullptr} if any of the above conditions are not met.
+  // This method effectively removes data from the tree. The intent of this
+  // method is to allow applications appending small string data to use
+  // pre-existing capacity, and add the modified rep back to the tree.
+  //
+  // Simplified such code would look similar to this:
+  //   void MyTreeBuilder::Append(string_view data) {
+  //     ExtractResult result = CordRepBtree::ExtractAppendBuffer(tree_, 1);
+  //     if (CordRep* rep = result.extracted) {
+  //       size_t available = rep->Capacity() - rep->length;
+  //       size_t n = std::min(data.size(), n);
+  //       memcpy(rep->Data(), data.data(), n);
+  //       rep->length += n;
+  //       data.remove_prefix(n);
+  //       if (!result.tree->IsBtree()) {
+  //         tree_ = CordRepBtree::Create(result.tree);
+  //       }
+  //       tree_ = CordRepBtree::Append(tree_, rep);
+  //     }
+  //     ...
+  //     // Remaining edge in `result.tree`.
+  //   }
+  static ExtractResult ExtractAppendBuffer(CordRepBtree* tree,
+                                           size_t extra_capacity = 1);
+
+  // Returns the `height` of the tree. The height of a tree is limited to
+  // kMaxHeight. `height` is implemented as an `int` as in some places we
+  // use negative (-1) values for 'data edges'.
+  int height() const { return static_cast<int>(storage[0]); }
+
+  // Properties: begin, back, end, front/back boundary indexes.
+  size_t begin() const { return static_cast<size_t>(storage[1]); }
+  size_t back() const { return static_cast<size_t>(storage[2]) - 1; }
+  size_t end() const { return static_cast<size_t>(storage[2]); }
+  size_t index(EdgeType edge) const {
+    return edge == kFront ? begin() : back();
+  }
+
+  // Properties: size and capacity.
+  // `capacity` contains the current capacity of this instance, where
+  // `kMaxCapacity` contains the maximum capacity of a btree node.
+  // For now, `capacity` and `kMaxCapacity` return the same value, but this may
+  // change in the future if we see benefit in dynamically sizing 'small' nodes
+  // to 'large' nodes for large data trees.
+  size_t size() const { return end() - begin(); }
+  size_t capacity() const { return kMaxCapacity; }
+
+  // Edge access
+  inline CordRep* Edge(size_t index) const;
+  inline CordRep* Edge(EdgeType edge_type) const;
+  inline absl::Span<CordRep* const> Edges() const;
+  inline absl::Span<CordRep* const> Edges(size_t begin, size_t end) const;
+
+  // Returns reference to the data edge at `index`.
+  // Requires this instance to be a leaf node, and `index` to be valid index.
+  inline absl::string_view Data(size_t index) const;
+
+  // Diagnostics: returns true if `tree` is valid and internally consistent.
+  // If `shallow` is false, then the provided top level node and all child nodes
+  // below it are recursively checked. If `shallow` is true, only the provided
+  // node in `tree` and the cumulative length, type and height of the direct
+  // child nodes of `tree` are checked. The value of `shallow` is ignored if the
+  // internal `cord_btree_exhaustive_validation` diagnostics variable is true,
+  // in which case the performed validations works as if `shallow` were false.
+  // This function is intended for debugging and testing purposes only.
+  static bool IsValid(const CordRepBtree* tree, bool shallow = false);
+
+  // Diagnostics: asserts that the provided tree is valid.
+  // `AssertValid()` performs a shallow validation by default. `shallow` can be
+  // set to false in which case an exhaustive validation is performed. This
+  // function is implemented in terms of calling `IsValid()` and asserting the
+  // return value to be true. See `IsValid()` for more information.
+  // This function is intended for debugging and testing purposes only.
+  static CordRepBtree* AssertValid(CordRepBtree* tree, bool shallow = true);
+  static const CordRepBtree* AssertValid(const CordRepBtree* tree,
+                                         bool shallow = true);
+
+  // Diagnostics: dump the contents of this tree to `stream`.
+  // This function is intended for debugging and testing purposes only.
+  static void Dump(const CordRep* rep, std::ostream& stream);
+  static void Dump(const CordRep* rep, absl::string_view label,
+                   std::ostream& stream);
+  static void Dump(const CordRep* rep, absl::string_view label,
+                   bool include_contents, std::ostream& stream);
+
+  // Adds the edge `edge` to this node if possible. `owned` indicates if the
+  // current node is potentially shared or not with other threads. Returns:
+  // - {kSelf, <this>}
+  //   The edge was directly added to this node.
+  // - {kCopied, <node>}
+  //   The edge was added to a copy of this node.
+  // - {kPopped, New(edge, height())}
+  //   A new leg with the edge was created as this node has no extra capacity.
+  template <EdgeType edge_type>
+  inline OpResult AddEdge(bool owned, CordRep* edge, size_t delta);
+
+  // Replaces the front or back edge with the provided new edge. Returns:
+  // - {kSelf, <this>}
+  //   The edge was directly set in this node. The old edge is unreffed.
+  // - {kCopied, <node>}
+  //   A copy of this node was created with the new edge value.
+  // In both cases, the function adopts a reference on `edge`.
+  template <EdgeType edge_type>
+  OpResult SetEdge(bool owned, CordRep* edge, size_t delta);
+
+  // Creates a new empty node at the specified height.
+  static CordRepBtree* New(int height = 0);
+
+  // Creates a new node containing `rep`, with the height being computed
+  // automatically based on the type of `rep`.
+  static CordRepBtree* New(CordRep* rep);
+
+  // Creates a new node containing both `front` and `back` at height
+  // `front.height() + 1`. Requires `back.height() == front.height()`.
+  static CordRepBtree* New(CordRepBtree* front, CordRepBtree* back);
+
+  // Creates a fully balanced tree from the provided tree by rebuilding a new
+  // tree from all data edges in the input. This function is automatically
+  // invoked internally when the tree exceeds the maximum height.
+  static CordRepBtree* Rebuild(CordRepBtree* tree);
+
+ private:
+  CordRepBtree() = default;
+  ~CordRepBtree() = default;
+
+  // Initializes the main properties `tag`, `begin`, `end`, `height`.
+  inline void InitInstance(int height, size_t begin = 0, size_t end = 0);
+
+  // Direct property access begin / end
+  void set_begin(size_t begin) { storage[1] = static_cast<uint8_t>(begin); }
+  void set_end(size_t end) { storage[2] = static_cast<uint8_t>(end); }
+
+  // Decreases the value of `begin` by `n`, and returns the new value. Notice
+  // how this returns the new value unlike atomic::fetch_add which returns the
+  // old value. This is because this is used to prepend edges at 'begin - 1'.
+  size_t sub_fetch_begin(size_t n) {
+    storage[1] -= static_cast<uint8_t>(n);
+    return storage[1];
+  }
+
+  // Increases the value of `end` by `n`, and returns the previous value. This
+  // function is typically used to append edges at 'end'.
+  size_t fetch_add_end(size_t n) {
+    const uint8_t current = storage[2];
+    storage[2] = static_cast<uint8_t>(current + n);
+    return current;
+  }
+
+  // Returns the index of the last edge starting on, or before `offset`, with
+  // `n` containing the relative offset of `offset` inside that edge.
+  // Requires `offset` < length.
+  Position IndexOf(size_t offset) const;
+
+  // Returns the index of the last edge starting before `offset`, with `n`
+  // containing the relative offset of `offset` inside that edge.
+  // This function is useful to find the edges for some span of bytes ending at
+  // `offset` (i.e., `n` bytes). For example:
+  //
+  //   Position pos = IndexBefore(n)
+  //   edges = Edges(begin(), pos.index)     // All full edges (may be empty)
+  //   last = Sub(Edge(pos.index), 0, pos.n) // Last partial edge (may be empty)
+  //
+  // Requires 0 < `offset` <= length.
+  Position IndexBefore(size_t offset) const;
+
+  // Returns the index of the edge ending at (or on) length `length`, and the
+  // number of bytes inside that edge up to `length`. For example, if we have a
+  // Node with 2 edges, one of 10 and one of 20 long, then IndexOfLength(27)
+  // will return {1, 17}, and IndexOfLength(10) will return {0, 10}.
+  Position IndexOfLength(size_t n) const;
+
+  // Identical to the above function except starting from the position `front`.
+  // This function is equivalent to `IndexBefore(front.n + offset)`, with
+  // the difference that this function is optimized to start at `front.index`.
+  Position IndexBefore(Position front, size_t offset) const;
+
+  // Returns the index of the edge directly beyond the edge containing offset
+  // `offset`, with `n` containing the distance of that edge from `offset`.
+  // This function is useful for iteratively finding suffix nodes and remaining
+  // partial bytes in left-most suffix nodes as for example in CopySuffix.
+  // Requires `offset` < length.
+  Position IndexBeyond(size_t offset) const;
+
+  // Creates a new leaf node containing as much data as possible from `data`.
+  // The data is added either forwards or reversed depending on `edge_type`.
+  // Callers must check the length of the returned node to determine if all data
+  // was copied or not.
+  // See the `Append/Prepend` function for the meaning and purpose of `extra`.
+  template <EdgeType edge_type>
+  static CordRepBtree* NewLeaf(absl::string_view data, size_t extra);
+
+  // Creates a raw copy of this Btree node with the specified length, copying
+  // all properties, but without adding any references to existing edges.
+  CordRepBtree* CopyRaw(size_t new_length) const;
+
+  // Creates a full copy of this Btree node, adding a reference on all edges.
+  CordRepBtree* Copy() const;
+
+  // Creates a partial copy of this Btree node, copying all edges up to `end`,
+  // adding a reference on each copied edge, and sets the length of the newly
+  // created copy to `new_length`.
+  CordRepBtree* CopyBeginTo(size_t end, size_t new_length) const;
+
+  // Returns a tree containing the edges [tree->begin(), end) and length
+  // of `new_length`. This method consumes a reference on the provided
+  // tree, and logically performs the following operation:
+  //   result = tree->CopyBeginTo(end, new_length);
+  //   CordRep::Unref(tree);
+  //   return result;
+  static CordRepBtree* ConsumeBeginTo(CordRepBtree* tree, size_t end,
+                                      size_t new_length);
+
+  // Creates a partial copy of this Btree node, copying all edges starting at
+  // `begin`, adding a reference on each copied edge, and sets the length of
+  // the newly created copy to `new_length`.
+  CordRepBtree* CopyToEndFrom(size_t begin, size_t new_length) const;
+
+  // Extracts and returns the front edge from the provided tree.
+  // This method consumes a reference on the provided tree, and logically
+  // performs the following operation:
+  //   edge = CordRep::Ref(tree->Edge(kFront));
+  //   CordRep::Unref(tree);
+  //   return edge;
+  static CordRep* ExtractFront(CordRepBtree* tree);
+
+  // Returns a tree containing the result of appending `right` to `left`.
+  static CordRepBtree* MergeTrees(CordRepBtree* left, CordRepBtree* right);
+
+  // Fallback functions for `Create()`, `Append()` and `Prepend()` which
+  // deal with legacy / non conforming input, i.e.: CONCAT trees.
+  static CordRepBtree* CreateSlow(CordRep* rep);
+  static CordRepBtree* AppendSlow(CordRepBtree*, CordRep* rep);
+  static CordRepBtree* PrependSlow(CordRepBtree*, CordRep* rep);
+
+  // Recursively rebuilds `tree` into `stack`. If 'consume` is set to true, the
+  // function will consume a reference on `tree`. `stack` is a null terminated
+  // array containing the new tree's state, with the current leaf node at
+  // stack[0], and parent nodes above that, or null for 'top of tree'.
+  static void Rebuild(CordRepBtree** stack, CordRepBtree* tree, bool consume);
+
+  // Aligns existing edges to start at index 0, to allow for a new edge to be
+  // added to the back of the current edges.
+  inline void AlignBegin();
+
+  // Aligns existing edges to end at `capacity`, to allow for a new edge to be
+  // added in front of the current edges.
+  inline void AlignEnd();
+
+  // Adds the provided edge to this node.
+  // Requires this node to have capacity for the edge. Realigns / moves
+  // existing edges as needed to prepend or append the new edge.
+  template <EdgeType edge_type>
+  inline void Add(CordRep* rep);
+
+  // Adds the provided edges to this node.
+  // Requires this node to have capacity for the edges. Realigns / moves
+  // existing edges as needed to prepend or append the new edges.
+  template <EdgeType edge_type>
+  inline void Add(absl::Span<CordRep* const>);
+
+  // Adds data from `data` to this node until either all data has been consumed,
+  // or there is no more capacity for additional flat nodes inside this node.
+  // Requires the current node to be a leaf node, data to be non empty, and the
+  // current node to have capacity for at least one more data edge.
+  // Returns any remaining data from `data` that was not added, which is
+  // depending on the edge type (front / back) either the remaining prefix of
+  // suffix of the input.
+  // See the `Append/Prepend` function for the meaning and purpose of `extra`.
+  template <EdgeType edge_type>
+  absl::string_view AddData(absl::string_view data, size_t extra);
+
+  // Replace the front or back edge with the provided value.
+  // Adopts a reference on `edge` and unrefs the old edge.
+  template <EdgeType edge_type>
+  inline void SetEdge(CordRep* edge);
+
+  // Returns a partial copy of the current tree containing the first `n` bytes
+  // of data. `CopyResult` contains both the resulting edge and its height. The
+  // resulting tree may be less high than the current tree, or even be a single
+  // matching data edge if `allow_folding` is set to true.
+  // For example, if `n == 1`, then the result will be the single data edge, and
+  // height will be set to -1 (one below the owning leaf node). If n == 0, this
+  // function returns null. Requires `n <= length`
+  CopyResult CopyPrefix(size_t n, bool allow_folding = true);
+
+  // Returns a partial copy of the current tree containing all data starting
+  // after `offset`. `CopyResult` contains both the resulting edge and its
+  // height. The resulting tree may be less high than the current tree, or even
+  // be a single matching data edge. For example, if `n == length - 1`, then the
+  // result will be a single data edge, and height will be set to -1 (one below
+  // the owning leaf node).
+  // Requires `offset < length`
+  CopyResult CopySuffix(size_t offset);
+
+  // Returns a OpResult value of {this, kSelf} or {Copy(), kCopied}
+  // depending on the value of `owned`.
+  inline OpResult ToOpResult(bool owned);
+
+  // Adds `rep` to the specified tree, returning the modified tree.
+  template <EdgeType edge_type>
+  static CordRepBtree* AddCordRep(CordRepBtree* tree, CordRep* rep);
+
+  // Adds `data` to the specified tree, returning the modified tree.
+  // See the `Append/Prepend` function for the meaning and purpose of `extra`.
+  template <EdgeType edge_type>
+  static CordRepBtree* AddData(CordRepBtree* tree, absl::string_view data,
+                               size_t extra = 0);
+
+  // Merges `src` into `dst` with `src` being added either before (kFront) or
+  // after (kBack) `dst`. Requires the height of `dst` to be greater than or
+  // equal to the height of `src`.
+  template <EdgeType edge_type>
+  static CordRepBtree* Merge(CordRepBtree* dst, CordRepBtree* src);
+
+  // Fallback version of GetAppendBuffer for large trees: GetAppendBuffer()
+  // implements an inlined version for trees of limited height (3 levels),
+  // GetAppendBufferSlow implements the logic for large trees.
+  Span<char> GetAppendBufferSlow(size_t size);
+
+  // `edges_` contains all edges starting from this instance.
+  // These are explicitly `child` edges only, a cord btree (or any cord tree in
+  // that respect) does not store `parent` pointers anywhere: multiple trees /
+  // parents can reference the same shared child edge. The type of these edges
+  // depends on the height of the node. `Leaf nodes` (height == 0) contain `data
+  // edges` (external or flat nodes, or sub-strings thereof). All other nodes
+  // (height > 0) contain pointers to BTREE nodes with a height of `height - 1`.
+  CordRep* edges_[kMaxCapacity];
+
+  friend class CordRepBtreeTestPeer;
+  friend class CordRepBtreeNavigator;
+};
+
+inline CordRepBtree* CordRep::btree() {
+  assert(IsBtree());
+  return static_cast<CordRepBtree*>(this);
+}
+
+inline const CordRepBtree* CordRep::btree() const {
+  assert(IsBtree());
+  return static_cast<const CordRepBtree*>(this);
+}
+
+inline void CordRepBtree::InitInstance(int height, size_t begin, size_t end) {
+  tag = BTREE;
+  storage[0] = static_cast<uint8_t>(height);
+  storage[1] = static_cast<uint8_t>(begin);
+  storage[2] = static_cast<uint8_t>(end);
+}
+
+inline CordRep* CordRepBtree::Edge(size_t index) const {
+  assert(index >= begin());
+  assert(index < end());
+  return edges_[index];
+}
+
+inline CordRep* CordRepBtree::Edge(EdgeType edge_type) const {
+  return edges_[edge_type == kFront ? begin() : back()];
+}
+
+inline absl::Span<CordRep* const> CordRepBtree::Edges() const {
+  return {edges_ + begin(), size()};
+}
+
+inline absl::Span<CordRep* const> CordRepBtree::Edges(size_t begin,
+                                                      size_t end) const {
+  assert(begin <= end);
+  assert(begin >= this->begin());
+  assert(end <= this->end());
+  return {edges_ + begin, static_cast<size_t>(end - begin)};
+}
+
+inline absl::string_view CordRepBtree::Data(size_t index) const {
+  assert(height() == 0);
+  return EdgeData(Edge(index));
+}
+
+inline CordRepBtree* CordRepBtree::New(int height) {
+  CordRepBtree* tree = new CordRepBtree;
+  tree->length = 0;
+  tree->InitInstance(height);
+  return tree;
+}
+
+inline CordRepBtree* CordRepBtree::New(CordRep* rep) {
+  CordRepBtree* tree = new CordRepBtree;
+  int height = rep->IsBtree() ? rep->btree()->height() + 1 : 0;
+  tree->length = rep->length;
+  tree->InitInstance(height, /*begin=*/0, /*end=*/1);
+  tree->edges_[0] = rep;
+  return tree;
+}
+
+inline CordRepBtree* CordRepBtree::New(CordRepBtree* front,
+                                       CordRepBtree* back) {
+  assert(front->height() == back->height());
+  CordRepBtree* tree = new CordRepBtree;
+  tree->length = front->length + back->length;
+  tree->InitInstance(front->height() + 1, /*begin=*/0, /*end=*/2);
+  tree->edges_[0] = front;
+  tree->edges_[1] = back;
+  return tree;
+}
+
+inline void CordRepBtree::Unref(absl::Span<CordRep* const> edges) {
+  for (CordRep* edge : edges) {
+    if (ABSL_PREDICT_FALSE(!edge->refcount.Decrement())) {
+      CordRep::Destroy(edge);
+    }
+  }
+}
+
+inline CordRepBtree* CordRepBtree::CopyRaw(size_t new_length) const {
+  CordRepBtree* tree = new CordRepBtree;
+
+  // `length` and `refcount` are the first members of `CordRepBtree`.
+  // We initialize `length` using the given length, have `refcount` be set to
+  // ref = 1 through its default constructor, and copy all data beyond
+  // 'refcount' which starts with `tag` using a single memcpy: all contents
+  // except `refcount` is trivially copyable, and the compiler does not
+  // efficiently coalesce member-wise copy of these members.
+  // See https://gcc.godbolt.org/z/qY8zsca6z
+  // # LINT.IfChange(copy_raw)
+  tree->length = new_length;
+  uint8_t* dst = &tree->tag;
+  const uint8_t* src = &tag;
+  const ptrdiff_t offset = src - reinterpret_cast<const uint8_t*>(this);
+  memcpy(dst, src, sizeof(CordRepBtree) - static_cast<size_t>(offset));
+  return tree;
+  // # LINT.ThenChange()
+}
+
+inline CordRepBtree* CordRepBtree::Copy() const {
+  CordRepBtree* tree = CopyRaw(length);
+  for (CordRep* rep : Edges()) CordRep::Ref(rep);
+  return tree;
+}
+
+inline CordRepBtree* CordRepBtree::CopyToEndFrom(size_t begin,
+                                                 size_t new_length) const {
+  assert(begin >= this->begin());
+  assert(begin <= this->end());
+  CordRepBtree* tree = CopyRaw(new_length);
+  tree->set_begin(begin);
+  for (CordRep* edge : tree->Edges()) CordRep::Ref(edge);
+  return tree;
+}
+
+inline CordRepBtree* CordRepBtree::CopyBeginTo(size_t end,
+                                               size_t new_length) const {
+  assert(end <= capacity());
+  assert(end >= this->begin());
+  CordRepBtree* tree = CopyRaw(new_length);
+  tree->set_end(end);
+  for (CordRep* edge : tree->Edges()) CordRep::Ref(edge);
+  return tree;
+}
+
+inline void CordRepBtree::AlignBegin() {
+  // The below code itself does not need to be fast as typically we have
+  // mono-directional append/prepend calls, and `begin` / `end` are typically
+  // adjusted no more than once. But we want to avoid potential register clobber
+  // effects, making the compiler emit register save/store/spills, and minimize
+  // the size of code.
+  const size_t delta = begin();
+  if (ABSL_PREDICT_FALSE(delta != 0)) {
+    const size_t new_end = end() - delta;
+    set_begin(0);
+    set_end(new_end);
+    // TODO(mvels): we can write this using 2 loads / 2 stores depending on
+    // total size for the kMaxCapacity = 6 case. I.e., we can branch (switch) on
+    // size, and then do overlapping load/store of up to 4 pointers (inlined as
+    // XMM, YMM or ZMM load/store) and up to 2 pointers (XMM / YMM), which is a)
+    // compact and b) not clobbering any registers.
+    ABSL_ASSUME(new_end <= kMaxCapacity);
+#ifdef __clang__
+#pragma unroll 1
+#endif
+    for (size_t i = 0; i < new_end; ++i) {
+      edges_[i] = edges_[i + delta];
+    }
+  }
+}
+
+inline void CordRepBtree::AlignEnd() {
+  // See comments in `AlignBegin` for motivation on the hand-rolled for loops.
+  const size_t delta = capacity() - end();
+  if (delta != 0) {
+    const size_t new_begin = begin() + delta;
+    const size_t new_end = end() + delta;
+    set_begin(new_begin);
+    set_end(new_end);
+    ABSL_ASSUME(new_end <= kMaxCapacity);
+#ifdef __clang__
+#pragma unroll 1
+#endif
+    for (size_t i = new_end - 1; i >= new_begin; --i) {
+      edges_[i] = edges_[i - delta];
+    }
+  }
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kBack>(CordRep* rep) {
+  AlignBegin();
+  edges_[fetch_add_end(1)] = rep;
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kBack>(
+    absl::Span<CordRep* const> edges) {
+  AlignBegin();
+  size_t new_end = end();
+  for (CordRep* edge : edges) edges_[new_end++] = edge;
+  set_end(new_end);
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kFront>(CordRep* rep) {
+  AlignEnd();
+  edges_[sub_fetch_begin(1)] = rep;
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kFront>(
+    absl::Span<CordRep* const> edges) {
+  AlignEnd();
+  size_t new_begin = begin() - edges.size();
+  set_begin(new_begin);
+  for (CordRep* edge : edges) edges_[new_begin++] = edge;
+}
+
+template <CordRepBtree::EdgeType edge_type>
+inline void CordRepBtree::SetEdge(CordRep* edge) {
+  const int idx = edge_type == kFront ? begin() : back();
+  CordRep::Unref(edges_[idx]);
+  edges_[idx] = edge;
+}
+
+inline CordRepBtree::OpResult CordRepBtree::ToOpResult(bool owned) {
+  return owned ? OpResult{this, kSelf} : OpResult{Copy(), kCopied};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexOf(size_t offset) const {
+  assert(offset < length);
+  size_t index = begin();
+  while (offset >= edges_[index]->length) offset -= edges_[index++]->length;
+  return {index, offset};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexBefore(size_t offset) const {
+  assert(offset > 0);
+  assert(offset <= length);
+  size_t index = begin();
+  while (offset > edges_[index]->length) offset -= edges_[index++]->length;
+  return {index, offset};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexBefore(Position front,
+                                                        size_t offset) const {
+  size_t index = front.index;
+  offset = offset + front.n;
+  while (offset > edges_[index]->length) offset -= edges_[index++]->length;
+  return {index, offset};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexOfLength(size_t n) const {
+  assert(n <= length);
+  size_t index = back();
+  size_t strip = length - n;
+  while (strip >= edges_[index]->length) strip -= edges_[index--]->length;
+  return {index, edges_[index]->length - strip};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexBeyond(
+    const size_t offset) const {
+  // We need to find the edge which `starting offset` is beyond (>=)`offset`.
+  // For this we can't use the `offset -= length` logic of IndexOf. Instead, we
+  // track the offset of the `current edge` in `off`, which we increase as we
+  // iterate over the edges until we find the matching edge.
+  size_t off = 0;
+  size_t index = begin();
+  while (offset > off) off += edges_[index++]->length;
+  return {index, off - offset};
+}
+
+inline CordRepBtree* CordRepBtree::Create(CordRep* rep) {
+  if (IsDataEdge(rep)) return New(rep);
+  return CreateSlow(rep);
+}
+
+inline Span<char> CordRepBtree::GetAppendBuffer(size_t size) {
+  assert(refcount.IsOne());
+  CordRepBtree* tree = this;
+  const int height = this->height();
+  CordRepBtree* n1 = tree;
+  CordRepBtree* n2 = tree;
+  CordRepBtree* n3 = tree;
+  switch (height) {
+    case 3:
+      tree = tree->Edge(kBack)->btree();
+      if (!tree->refcount.IsOne()) return {};
+      n2 = tree;
+      ABSL_FALLTHROUGH_INTENDED;
+    case 2:
+      tree = tree->Edge(kBack)->btree();
+      if (!tree->refcount.IsOne()) return {};
+      n1 = tree;
+      ABSL_FALLTHROUGH_INTENDED;
+    case 1:
+      tree = tree->Edge(kBack)->btree();
+      if (!tree->refcount.IsOne()) return {};
+      ABSL_FALLTHROUGH_INTENDED;
+    case 0:
+      CordRep* edge = tree->Edge(kBack);
+      if (!edge->refcount.IsOne()) return {};
+      if (edge->tag < FLAT) return {};
+      size_t avail = edge->flat()->Capacity() - edge->length;
+      if (avail == 0) return {};
+      size_t delta = (std::min)(size, avail);
+      Span<char> span = {edge->flat()->Data() + edge->length, delta};
+      edge->length += delta;
+      switch (height) {
+        case 3:
+          n3->length += delta;
+          ABSL_FALLTHROUGH_INTENDED;
+        case 2:
+          n2->length += delta;
+          ABSL_FALLTHROUGH_INTENDED;
+        case 1:
+          n1->length += delta;
+          ABSL_FALLTHROUGH_INTENDED;
+        case 0:
+          tree->length += delta;
+          return span;
+      }
+      break;
+  }
+  return GetAppendBufferSlow(size);
+}
+
+extern template CordRepBtree* CordRepBtree::AddCordRep<CordRepBtree::kBack>(
+    CordRepBtree* tree, CordRep* rep);
+
+extern template CordRepBtree* CordRepBtree::AddCordRep<CordRepBtree::kFront>(
+    CordRepBtree* tree, CordRep* rep);
+
+inline CordRepBtree* CordRepBtree::Append(CordRepBtree* tree, CordRep* rep) {
+  if (ABSL_PREDICT_TRUE(IsDataEdge(rep))) {
+    return CordRepBtree::AddCordRep<kBack>(tree, rep);
+  }
+  return AppendSlow(tree, rep);
+}
+
+inline CordRepBtree* CordRepBtree::Prepend(CordRepBtree* tree, CordRep* rep) {
+  if (ABSL_PREDICT_TRUE(IsDataEdge(rep))) {
+    return CordRepBtree::AddCordRep<kFront>(tree, rep);
+  }
+  return PrependSlow(tree, rep);
+}
+
+#ifdef NDEBUG
+
+inline CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree,
+                                               bool /* shallow */) {
+  return tree;
+}
+
+inline const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree,
+                                                     bool /* shallow */) {
+  return tree;
+}
+
+#endif
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc b/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc
new file mode 100644
index 0000000..6ed20c2
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc
@@ -0,0 +1,187 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_btree_navigator.h"
+
+#include <cassert>
+
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+using ReadResult = CordRepBtreeNavigator::ReadResult;
+
+namespace {
+
+// Returns a `CordRepSubstring` from `rep` starting at `offset` of size `n`.
+// If `rep` is already a `CordRepSubstring` instance, an adjusted instance is
+// created based on the old offset and new offset.
+// Adopts a reference on `rep`. Rep must be a valid data edge. Returns
+// nullptr if `n == 0`, `rep` if `n == rep->length`.
+// Requires `offset < rep->length` and `offset + n <= rep->length`.
+// TODO(192061034): move to utility library in internal and optimize for small
+// substrings of larger reps.
+inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
+  assert(n <= rep->length);
+  assert(offset < rep->length);
+  assert(offset <= rep->length - n);
+  assert(IsDataEdge(rep));
+
+  if (n == 0) return nullptr;
+  if (n == rep->length) return CordRep::Ref(rep);
+
+  if (rep->tag == SUBSTRING) {
+    offset += rep->substring()->start;
+    rep = rep->substring()->child;
+  }
+
+  assert(rep->IsExternal() || rep->IsFlat());
+  CordRepSubstring* substring = new CordRepSubstring();
+  substring->length = n;
+  substring->tag = SUBSTRING;
+  substring->start = offset;
+  substring->child = CordRep::Ref(rep);
+  return substring;
+}
+
+inline CordRep* Substring(CordRep* rep, size_t offset) {
+  return Substring(rep, offset, rep->length - offset);
+}
+
+}  // namespace
+
+CordRepBtreeNavigator::Position CordRepBtreeNavigator::Skip(size_t n) {
+  int height = 0;
+  size_t index = index_[0];
+  CordRepBtree* node = node_[0];
+  CordRep* edge = node->Edge(index);
+
+  // Overall logic: Find an edge of at least the length we need to skip.
+  // We consume all edges which are smaller (i.e., must be 100% skipped).
+  // If we exhausted all edges on the current level, we move one level
+  // up the tree, and repeat until we either find the edge, or until we hit
+  // the top of the tree meaning the skip exceeds tree->length.
+  while (n >= edge->length) {
+    n -= edge->length;
+    while (++index == node->end()) {
+      if (++height > height_) return {nullptr, n};
+      node = node_[height];
+      index = index_[height];
+    }
+    edge = node->Edge(index);
+  }
+
+  // If we moved up the tree, descend down to the leaf level, consuming all
+  // edges that must be skipped.
+  while (height > 0) {
+    node = edge->btree();
+    index_[height] = static_cast<uint8_t>(index);
+    node_[--height] = node;
+    index = node->begin();
+    edge = node->Edge(index);
+    while (n >= edge->length) {
+      n -= edge->length;
+      ++index;
+      assert(index != node->end());
+      edge = node->Edge(index);
+    }
+  }
+  index_[0] = static_cast<uint8_t>(index);
+  return {edge, n};
+}
+
+ReadResult CordRepBtreeNavigator::Read(size_t edge_offset, size_t n) {
+  int height = 0;
+  size_t length = edge_offset + n;
+  size_t index = index_[0];
+  CordRepBtree* node = node_[0];
+  CordRep* edge = node->Edge(index);
+  assert(edge_offset < edge->length);
+
+  if (length < edge->length) {
+    return {Substring(edge, edge_offset, n), length};
+  }
+
+  // Similar to 'Skip', we consume all edges that are inside the 'length' of
+  // data that needs to be read. If we exhaust the current level, we move one
+  // level up the tree and repeat until we hit the final edge that must be
+  // (partially) read. We consume all edges into `subtree`.
+  CordRepBtree* subtree = CordRepBtree::New(Substring(edge, edge_offset));
+  size_t subtree_end = 1;
+  do {
+    length -= edge->length;
+    while (++index == node->end()) {
+      index_[height] = static_cast<uint8_t>(index);
+      if (++height > height_) {
+        subtree->set_end(subtree_end);
+        if (length == 0) return {subtree, 0};
+        CordRep::Unref(subtree);
+        return {nullptr, length};
+      }
+      if (length != 0) {
+        subtree->set_end(subtree_end);
+        subtree = CordRepBtree::New(subtree);
+        subtree_end = 1;
+      }
+      node = node_[height];
+      index = index_[height];
+    }
+    edge = node->Edge(index);
+    if (length >= edge->length) {
+      subtree->length += edge->length;
+      subtree->edges_[subtree_end++] = CordRep::Ref(edge);
+    }
+  } while (length >= edge->length);
+  CordRepBtree* tree = subtree;
+  subtree->length += length;
+
+  // If we moved up the tree, descend down to the leaf level, consuming all
+  // edges that must be read, adding 'down' nodes to `subtree`.
+  while (height > 0) {
+    node = edge->btree();
+    index_[height] = static_cast<uint8_t>(index);
+    node_[--height] = node;
+    index = node->begin();
+    edge = node->Edge(index);
+
+    if (length != 0) {
+      CordRepBtree* right = CordRepBtree::New(height);
+      right->length = length;
+      subtree->edges_[subtree_end++] = right;
+      subtree->set_end(subtree_end);
+      subtree = right;
+      subtree_end = 0;
+      while (length >= edge->length) {
+        subtree->edges_[subtree_end++] = CordRep::Ref(edge);
+        length -= edge->length;
+        edge = node->Edge(++index);
+      }
+    }
+  }
+  // Add any (partial) edge still remaining at the leaf level.
+  if (length != 0) {
+    subtree->edges_[subtree_end++] = Substring(edge, 0, length);
+  }
+  subtree->set_end(subtree_end);
+  index_[0] = static_cast<uint8_t>(index);
+  return {tree, length};
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h b/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h
new file mode 100644
index 0000000..3d581c8
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h
@@ -0,0 +1,267 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_
+
+#include <cassert>
+#include <iostream>
+
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepBtreeNavigator is a bi-directional navigator allowing callers to
+// navigate all the (leaf) data edges in a CordRepBtree instance.
+//
+// A CordRepBtreeNavigator instance is by default empty. Callers initialize a
+// navigator instance by calling one of `InitFirst()`, `InitLast()` or
+// `InitOffset()`, which establishes a current position. Callers can then
+// navigate using the `Next`, `Previous`, `Skip` and `Seek` methods.
+//
+// The navigator instance does not take or adopt a reference on the provided
+// `tree` on any of the initialization calls. Callers are responsible for
+// guaranteeing the lifecycle of the provided tree. A navigator instance can
+// be reset to the empty state by calling `Reset`.
+//
+// A navigator only keeps positional state on the 'current data edge', it does
+// explicitly not keep any 'offset' state. The class does accept and return
+// offsets in the `Read()`, `Skip()` and 'Seek()` methods as these would
+// otherwise put a big burden on callers. Callers are expected to maintain
+// (returned) offset info if they require such granular state.
+class CordRepBtreeNavigator {
+ public:
+  // The logical position as returned by the Seek() and Skip() functions.
+  // Returns the current leaf edge for the desired seek or skip position and
+  // the offset of that position inside that edge.
+  struct Position {
+    CordRep* edge;
+    size_t offset;
+  };
+
+  // The read result as returned by the Read() function.
+  // `tree` contains the resulting tree which is identical to the result
+  // of calling CordRepBtree::SubTree(...) on the tree being navigated.
+  // `n` contains the number of bytes used from the last navigated to
+  // edge of the tree.
+  struct ReadResult {
+    CordRep* tree;
+    size_t n;
+  };
+
+  // Returns true if this instance is not empty.
+  explicit operator bool() const;
+
+  // Returns the tree for this instance or nullptr if empty.
+  CordRepBtree* btree() const;
+
+  // Returns the data edge of the current position.
+  // Requires this instance to not be empty.
+  CordRep* Current() const;
+
+  // Resets this navigator to `tree`, returning the first data edge in the tree.
+  CordRep* InitFirst(CordRepBtree* tree);
+
+  // Resets this navigator to `tree`, returning the last data edge in the tree.
+  CordRep* InitLast(CordRepBtree* tree);
+
+  // Resets this navigator to `tree` returning the data edge at position
+  // `offset` and the relative offset of `offset` into that data edge.
+  // Returns `Position.edge = nullptr` if the provided offset is greater
+  // than or equal to the length of the tree, in which case the state of
+  // the navigator instance remains unchanged.
+  Position InitOffset(CordRepBtree* tree, size_t offset);
+
+  // Navigates to the next data edge.
+  // Returns the next data edge or nullptr if there is no next data edge, in
+  // which case the current position remains unchanged.
+  CordRep* Next();
+
+  // Navigates to the previous data edge.
+  // Returns the previous data edge or nullptr if there is no previous data
+  // edge, in which case the current position remains unchanged.
+  CordRep* Previous();
+
+  // Navigates to the data edge at position `offset`. Returns the navigated to
+  // data edge in `Position.edge` and the relative offset of `offset` into that
+  // data edge in `Position.offset`. Returns `Position.edge = nullptr` if the
+  // provide offset is greater than or equal to the tree's length.
+  Position Seek(size_t offset);
+
+  // Reads `n` bytes of data starting at offset `edge_offset` of the current
+  // data edge, and returns the result in `ReadResult.tree`. `ReadResult.n`
+  // contains the 'bytes used` from the last / current data edge in the tree.
+  // This allows users that mix regular navigation (using string views) and
+  // 'read into cord' navigation to keep track of the current state, and which
+  // bytes have been consumed from a navigator.
+  // This function returns `ReadResult.tree = nullptr` if the requested length
+  // exceeds the length of the tree starting at the current data edge.
+  ReadResult Read(size_t edge_offset, size_t n);
+
+  // Skips `n` bytes forward from the current data edge, returning the navigated
+  // to data edge in `Position.edge` and `Position.offset` containing the offset
+  // inside that data edge. Note that the state of the navigator is left
+  // unchanged if `n` is smaller than the length of the current data edge.
+  Position Skip(size_t n);
+
+  // Resets this instance to the default / empty state.
+  void Reset();
+
+ private:
+  // Slow path for Next() if Next() reached the end of a leaf node. Backtracks
+  // up the stack until it finds a node that has a 'next' position available,
+  // and then does a 'front dive' towards the next leaf node.
+  CordRep* NextUp();
+
+  // Slow path for Previous() if Previous() reached the beginning of a leaf
+  // node. Backtracks up the stack until it finds a node that has a 'previous'
+  // position available, and then does a 'back dive' towards the previous leaf
+  // node.
+  CordRep* PreviousUp();
+
+  // Generic implementation of InitFirst() and InitLast().
+  template <CordRepBtree::EdgeType edge_type>
+  CordRep* Init(CordRepBtree* tree);
+
+  // `height_` contains the height of the current tree, or -1 if empty.
+  int height_ = -1;
+
+  // `index_` and `node_` contain the navigation state as the 'path' to the
+  // current data edge which is at `node_[0]->Edge(index_[0])`. The contents
+  // of these are undefined until the instance is initialized (`height_ >= 0`).
+  uint8_t index_[CordRepBtree::kMaxDepth];
+  CordRepBtree* node_[CordRepBtree::kMaxDepth];
+};
+
+// Returns true if this instance is not empty.
+inline CordRepBtreeNavigator::operator bool() const { return height_ >= 0; }
+
+inline CordRepBtree* CordRepBtreeNavigator::btree() const {
+  return height_ >= 0 ? node_[height_] : nullptr;
+}
+
+inline CordRep* CordRepBtreeNavigator::Current() const {
+  assert(height_ >= 0);
+  return node_[0]->Edge(index_[0]);
+}
+
+inline void CordRepBtreeNavigator::Reset() { height_ = -1; }
+
+inline CordRep* CordRepBtreeNavigator::InitFirst(CordRepBtree* tree) {
+  return Init<CordRepBtree::kFront>(tree);
+}
+
+inline CordRep* CordRepBtreeNavigator::InitLast(CordRepBtree* tree) {
+  return Init<CordRepBtree::kBack>(tree);
+}
+
+template <CordRepBtree::EdgeType edge_type>
+inline CordRep* CordRepBtreeNavigator::Init(CordRepBtree* tree) {
+  assert(tree != nullptr);
+  assert(tree->size() > 0);
+  assert(tree->height() <= CordRepBtree::kMaxHeight);
+  int height = height_ = tree->height();
+  size_t index = tree->index(edge_type);
+  node_[height] = tree;
+  index_[height] = static_cast<uint8_t>(index);
+  while (--height >= 0) {
+    tree = tree->Edge(index)->btree();
+    node_[height] = tree;
+    index = tree->index(edge_type);
+    index_[height] = static_cast<uint8_t>(index);
+  }
+  return node_[0]->Edge(index);
+}
+
+inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::Seek(
+    size_t offset) {
+  assert(btree() != nullptr);
+  int height = height_;
+  CordRepBtree* edge = node_[height];
+  if (ABSL_PREDICT_FALSE(offset >= edge->length)) return {nullptr, 0};
+  CordRepBtree::Position index = edge->IndexOf(offset);
+  index_[height] = static_cast<uint8_t>(index.index);
+  while (--height >= 0) {
+    edge = edge->Edge(index.index)->btree();
+    node_[height] = edge;
+    index = edge->IndexOf(index.n);
+    index_[height] = static_cast<uint8_t>(index.index);
+  }
+  return {edge->Edge(index.index), index.n};
+}
+
+inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::InitOffset(
+    CordRepBtree* tree, size_t offset) {
+  assert(tree != nullptr);
+  assert(tree->height() <= CordRepBtree::kMaxHeight);
+  if (ABSL_PREDICT_FALSE(offset >= tree->length)) return {nullptr, 0};
+  height_ = tree->height();
+  node_[height_] = tree;
+  return Seek(offset);
+}
+
+inline CordRep* CordRepBtreeNavigator::Next() {
+  CordRepBtree* edge = node_[0];
+  return index_[0] == edge->back() ? NextUp() : edge->Edge(++index_[0]);
+}
+
+inline CordRep* CordRepBtreeNavigator::Previous() {
+  CordRepBtree* edge = node_[0];
+  return index_[0] == edge->begin() ? PreviousUp() : edge->Edge(--index_[0]);
+}
+
+inline CordRep* CordRepBtreeNavigator::NextUp() {
+  assert(index_[0] == node_[0]->back());
+  CordRepBtree* edge;
+  size_t index;
+  int height = 0;
+  do {
+    if (++height > height_) return nullptr;
+    edge = node_[height];
+    index = index_[height] + 1;
+  } while (index == edge->end());
+  index_[height] = static_cast<uint8_t>(index);
+  do {
+    node_[--height] = edge = edge->Edge(index)->btree();
+    index_[height] = static_cast<uint8_t>(index = edge->begin());
+  } while (height > 0);
+  return edge->Edge(index);
+}
+
+inline CordRep* CordRepBtreeNavigator::PreviousUp() {
+  assert(index_[0] == node_[0]->begin());
+  CordRepBtree* edge;
+  size_t index;
+  int height = 0;
+  do {
+    if (++height > height_) return nullptr;
+    edge = node_[height];
+    index = index_[height];
+  } while (index == edge->begin());
+  index_[height] = static_cast<uint8_t>(--index);
+  do {
+    node_[--height] = edge = edge->Edge(index)->btree();
+    index_[height] = static_cast<uint8_t>(index = edge->back());
+  } while (height > 0);
+  return edge->Edge(index);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc b/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc
new file mode 100644
index 0000000..bed7550
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc
@@ -0,0 +1,346 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_btree_navigator.h"
+
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::Eq;
+using ::testing::Ne;
+
+using ::absl::cordrep_testing::CordRepBtreeFromFlats;
+using ::absl::cordrep_testing::CordToString;
+using ::absl::cordrep_testing::CreateFlatsFromString;
+using ::absl::cordrep_testing::CreateRandomString;
+using ::absl::cordrep_testing::MakeFlat;
+using ::absl::cordrep_testing::MakeSubstring;
+
+using ReadResult = CordRepBtreeNavigator::ReadResult;
+using Position = CordRepBtreeNavigator::Position;
+
+// CordRepBtreeNavigatorTest is a test fixture which automatically creates a
+// tree to test navigation logic on. The parameter `count' defines the number of
+// data edges in the test tree.
+class CordRepBtreeNavigatorTest : public testing::TestWithParam<size_t> {
+ public:
+  using Flats = std::vector<CordRep*>;
+  static constexpr size_t kCharsPerFlat = 3;
+
+  CordRepBtreeNavigatorTest() {
+    data_ = CreateRandomString(count() * kCharsPerFlat);
+    flats_ = CreateFlatsFromString(data_, kCharsPerFlat);
+
+    // Turn flat 0 or 1 into a substring to cover partial reads on substrings.
+    if (count() > 1) {
+      CordRep::Unref(flats_[1]);
+      flats_[1] = MakeSubstring(kCharsPerFlat, kCharsPerFlat, MakeFlat(data_));
+    } else {
+      CordRep::Unref(flats_[0]);
+      flats_[0] = MakeSubstring(0, kCharsPerFlat, MakeFlat(data_));
+    }
+
+    tree_ = CordRepBtreeFromFlats(flats_);
+  }
+
+  ~CordRepBtreeNavigatorTest() override { CordRep::Unref(tree_); }
+
+  size_t count() const { return GetParam(); }
+  CordRepBtree* tree() { return tree_; }
+  const std::string& data() const { return data_; }
+  const std::vector<CordRep*>& flats() const { return flats_; }
+
+  static std::string ToString(testing::TestParamInfo<size_t> param) {
+    return absl::StrCat(param.param, "_Flats");
+  }
+
+ private:
+  std::string data_;
+  Flats flats_;
+  CordRepBtree* tree_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+    WithParam, CordRepBtreeNavigatorTest,
+    testing::Values(1, CordRepBtree::kMaxCapacity - 1,
+                    CordRepBtree::kMaxCapacity,
+                    CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity - 1,
+                    CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity,
+                    CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity + 1,
+                    CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity * 2 +
+                        17),
+    CordRepBtreeNavigatorTest::ToString);
+
+TEST(CordRepBtreeNavigatorTest, Uninitialized) {
+  CordRepBtreeNavigator nav;
+  EXPECT_FALSE(nav);
+  EXPECT_THAT(nav.btree(), Eq(nullptr));
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+  EXPECT_DEATH(nav.Current(), ".*");
+#endif
+}
+
+TEST_P(CordRepBtreeNavigatorTest, InitFirst) {
+  CordRepBtreeNavigator nav;
+  CordRep* edge = nav.InitFirst(tree());
+  EXPECT_TRUE(nav);
+  EXPECT_THAT(nav.btree(), Eq(tree()));
+  EXPECT_THAT(nav.Current(), Eq(flats().front()));
+  EXPECT_THAT(edge, Eq(flats().front()));
+}
+
+TEST_P(CordRepBtreeNavigatorTest, InitLast) {
+  CordRepBtreeNavigator nav;
+  CordRep* edge = nav.InitLast(tree());
+  EXPECT_TRUE(nav);
+  EXPECT_THAT(nav.btree(), Eq(tree()));
+  EXPECT_THAT(nav.Current(), Eq(flats().back()));
+  EXPECT_THAT(edge, Eq(flats().back()));
+}
+
+TEST_P(CordRepBtreeNavigatorTest, NextPrev) {
+  CordRepBtreeNavigator nav;
+  nav.InitFirst(tree());
+  const Flats& flats = this->flats();
+
+  EXPECT_THAT(nav.Previous(), Eq(nullptr));
+  EXPECT_THAT(nav.Current(), Eq(flats.front()));
+  for (size_t i = 1; i < flats.size(); ++i) {
+    ASSERT_THAT(nav.Next(), Eq(flats[i]));
+    EXPECT_THAT(nav.Current(), Eq(flats[i]));
+  }
+  EXPECT_THAT(nav.Next(), Eq(nullptr));
+  EXPECT_THAT(nav.Current(), Eq(flats.back()));
+  for (size_t i = flats.size() - 1; i > 0; --i) {
+    ASSERT_THAT(nav.Previous(), Eq(flats[i - 1]));
+    EXPECT_THAT(nav.Current(), Eq(flats[i - 1]));
+  }
+  EXPECT_THAT(nav.Previous(), Eq(nullptr));
+  EXPECT_THAT(nav.Current(), Eq(flats.front()));
+}
+
+TEST_P(CordRepBtreeNavigatorTest, PrevNext) {
+  CordRepBtreeNavigator nav;
+  nav.InitLast(tree());
+  const Flats& flats = this->flats();
+
+  EXPECT_THAT(nav.Next(), Eq(nullptr));
+  EXPECT_THAT(nav.Current(), Eq(flats.back()));
+  for (size_t i = flats.size() - 1; i > 0; --i) {
+    ASSERT_THAT(nav.Previous(), Eq(flats[i - 1]));
+    EXPECT_THAT(nav.Current(), Eq(flats[i - 1]));
+  }
+  EXPECT_THAT(nav.Previous(), Eq(nullptr));
+  EXPECT_THAT(nav.Current(), Eq(flats.front()));
+  for (size_t i = 1; i < flats.size(); ++i) {
+    ASSERT_THAT(nav.Next(), Eq(flats[i]));
+    EXPECT_THAT(nav.Current(), Eq(flats[i]));
+  }
+  EXPECT_THAT(nav.Next(), Eq(nullptr));
+  EXPECT_THAT(nav.Current(), Eq(flats.back()));
+}
+
+TEST(CordRepBtreeNavigatorTest, Reset) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
+  CordRepBtreeNavigator nav;
+  nav.InitFirst(tree);
+  nav.Reset();
+  EXPECT_FALSE(nav);
+  EXPECT_THAT(nav.btree(), Eq(nullptr));
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+  EXPECT_DEATH(nav.Current(), ".*");
+#endif
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeNavigatorTest, Skip) {
+  size_t count = this->count();
+  const Flats& flats = this->flats();
+  CordRepBtreeNavigator nav;
+  nav.InitFirst(tree());
+
+  for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
+    Position pos = nav.Skip(char_offset);
+    EXPECT_THAT(pos.edge, Eq(nav.Current()));
+    EXPECT_THAT(pos.edge, Eq(flats[0]));
+    EXPECT_THAT(pos.offset, Eq(char_offset));
+  }
+
+  for (size_t index1 = 0; index1 < count; ++index1) {
+    for (size_t index2 = index1; index2 < count; ++index2) {
+      for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
+        CordRepBtreeNavigator nav;
+        nav.InitFirst(tree());
+
+        size_t length1 = index1 * kCharsPerFlat;
+        Position pos1 = nav.Skip(length1 + char_offset);
+        ASSERT_THAT(pos1.edge, Eq(flats[index1]));
+        ASSERT_THAT(pos1.edge, Eq(nav.Current()));
+        ASSERT_THAT(pos1.offset, Eq(char_offset));
+
+        size_t length2 = index2 * kCharsPerFlat;
+        Position pos2 = nav.Skip(length2 - length1 + char_offset);
+        ASSERT_THAT(pos2.edge, Eq(flats[index2]));
+        ASSERT_THAT(pos2.edge, Eq(nav.Current()));
+        ASSERT_THAT(pos2.offset, Eq(char_offset));
+      }
+    }
+  }
+}
+
+TEST_P(CordRepBtreeNavigatorTest, Seek) {
+  size_t count = this->count();
+  const Flats& flats = this->flats();
+  CordRepBtreeNavigator nav;
+  nav.InitFirst(tree());
+
+  for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
+    Position pos = nav.Seek(char_offset);
+    EXPECT_THAT(pos.edge, Eq(nav.Current()));
+    EXPECT_THAT(pos.edge, Eq(flats[0]));
+    EXPECT_THAT(pos.offset, Eq(char_offset));
+  }
+
+  for (size_t index = 0; index < count; ++index) {
+    for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
+      size_t offset = index * kCharsPerFlat + char_offset;
+      Position pos1 = nav.Seek(offset);
+      ASSERT_THAT(pos1.edge, Eq(flats[index]));
+      ASSERT_THAT(pos1.edge, Eq(nav.Current()));
+      ASSERT_THAT(pos1.offset, Eq(char_offset));
+    }
+  }
+}
+
+TEST(CordRepBtreeNavigatorTest, InitOffset) {
+  // Whitebox: InitOffset() is implemented in terms of Seek() which is
+  // exhaustively tested. Only test it initializes / forwards properly..
+  CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
+  tree = CordRepBtree::Append(tree, MakeFlat("def"));
+  CordRepBtreeNavigator nav;
+  Position pos = nav.InitOffset(tree, 5);
+  EXPECT_TRUE(nav);
+  EXPECT_THAT(nav.btree(), Eq(tree));
+  EXPECT_THAT(pos.edge, Eq(tree->Edges()[1]));
+  EXPECT_THAT(pos.edge, Eq(nav.Current()));
+  EXPECT_THAT(pos.offset, Eq(2u));
+  CordRep::Unref(tree);
+}
+
+TEST(CordRepBtreeNavigatorTest, InitOffsetAndSeekBeyondLength) {
+  CordRepBtree* tree1 = CordRepBtree::Create(MakeFlat("abc"));
+  CordRepBtree* tree2 = CordRepBtree::Create(MakeFlat("def"));
+
+  CordRepBtreeNavigator nav;
+  nav.InitFirst(tree1);
+  EXPECT_THAT(nav.Seek(3).edge, Eq(nullptr));
+  EXPECT_THAT(nav.Seek(100).edge, Eq(nullptr));
+  EXPECT_THAT(nav.btree(), Eq(tree1));
+  EXPECT_THAT(nav.Current(), Eq(tree1->Edges().front()));
+
+  EXPECT_THAT(nav.InitOffset(tree2, 3).edge, Eq(nullptr));
+  EXPECT_THAT(nav.InitOffset(tree2, 100).edge, Eq(nullptr));
+  EXPECT_THAT(nav.btree(), Eq(tree1));
+  EXPECT_THAT(nav.Current(), Eq(tree1->Edges().front()));
+
+  CordRep::Unref(tree1);
+  CordRep::Unref(tree2);
+}
+
+TEST_P(CordRepBtreeNavigatorTest, Read) {
+  const Flats& flats = this->flats();
+  const std::string& data = this->data();
+
+  for (size_t offset = 0; offset < data.size(); ++offset) {
+    for (size_t length = 1; length <= data.size() - offset; ++length) {
+      CordRepBtreeNavigator nav;
+      nav.InitFirst(tree());
+
+      // Skip towards edge holding offset
+      size_t edge_offset = nav.Skip(offset).offset;
+
+      // Read node
+      ReadResult result = nav.Read(edge_offset, length);
+      ASSERT_THAT(result.tree, Ne(nullptr));
+      EXPECT_THAT(result.tree->length, Eq(length));
+      if (result.tree->tag == BTREE) {
+        ASSERT_TRUE(CordRepBtree::IsValid(result.tree->btree()));
+      }
+
+      // Verify contents
+      std::string value = CordToString(result.tree);
+      EXPECT_THAT(value, Eq(data.substr(offset, length)));
+
+      // Verify 'partial last edge' reads.
+      size_t partial = (offset + length) % kCharsPerFlat;
+      ASSERT_THAT(result.n, Eq(partial));
+
+      // Verify ending position if not EOF
+      if (offset + length < data.size()) {
+        size_t index = (offset + length) / kCharsPerFlat;
+        EXPECT_THAT(nav.Current(), Eq(flats[index]));
+      }
+
+      CordRep::Unref(result.tree);
+    }
+  }
+}
+
+TEST_P(CordRepBtreeNavigatorTest, ReadBeyondLengthOfTree) {
+  CordRepBtreeNavigator nav;
+  nav.InitFirst(tree());
+  ReadResult result = nav.Read(2, tree()->length);
+  ASSERT_THAT(result.tree, Eq(nullptr));
+}
+
+TEST(CordRepBtreeNavigatorTest, NavigateMaximumTreeDepth) {
+  CordRepFlat* flat1 = MakeFlat("Hello world");
+  CordRepFlat* flat2 = MakeFlat("World Hello");
+
+  CordRepBtree* node = CordRepBtree::Create(flat1);
+  node = CordRepBtree::Append(node, flat2);
+  while (node->height() < CordRepBtree::kMaxHeight) {
+    node = CordRepBtree::New(node);
+  }
+
+  CordRepBtreeNavigator nav;
+  CordRep* edge = nav.InitFirst(node);
+  EXPECT_THAT(edge, Eq(flat1));
+  EXPECT_THAT(nav.Next(), Eq(flat2));
+  EXPECT_THAT(nav.Next(), Eq(nullptr));
+  EXPECT_THAT(nav.Previous(), Eq(flat1));
+  EXPECT_THAT(nav.Previous(), Eq(nullptr));
+
+  CordRep::Unref(node);
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc b/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc
new file mode 100644
index 0000000..0d0e860
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc
@@ -0,0 +1,69 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_btree_reader.h"
+
+#include <cassert>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_btree_navigator.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
+                                           CordRep*& tree) {
+  assert(chunk_size <= navigator_.Current()->length);
+
+  // If chunk_size is non-zero, we need to start inside last returned edge.
+  // Else we start reading at the next data edge of the tree.
+  CordRep* edge = chunk_size ? navigator_.Current() : navigator_.Next();
+  const size_t offset = chunk_size ? edge->length - chunk_size : 0;
+
+  // Read the sub tree and verify we got what we wanted.
+  ReadResult result = navigator_.Read(offset, n);
+  tree = result.tree;
+
+  // If the data returned in `tree` was covered entirely by `chunk_size`, i.e.,
+  // read from the 'previous' edge, we did not consume any additional data, and
+  // can directly return the substring into the current data edge as the next
+  // chunk. We can easily establish from the above code that `navigator_.Next()`
+  // has not been called as that requires `chunk_size` to be zero.
+  if (n < chunk_size) return EdgeData(edge).substr(result.n);
+
+  // The amount of data taken from the last edge is `chunk_size` and `result.n`
+  // contains the offset into the current edge trailing the read data (which can
+  // be 0). As the call to `navigator_.Read()` could have consumed all remaining
+  // data, calling `navigator_.Current()` is not safe before checking if we
+  // already consumed all remaining data.
+  const size_t consumed_by_read = n - chunk_size - result.n;
+  if (consumed_by_read >= remaining_) {
+    remaining_ = 0;
+    return {};
+  }
+
+  // We did not read all data, return remaining data from current edge.
+  edge = navigator_.Current();
+  remaining_ -= consumed_by_read + edge->length;
+  return EdgeData(edge).substr(result.n);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h b/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h
new file mode 100644
index 0000000..8db8f8d
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h
@@ -0,0 +1,212 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
+
+#include <cassert>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_btree_navigator.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepBtreeReader implements logic to iterate over cord btrees.
+// References to the underlying data are returned as absl::string_view values.
+// The most typical use case is a forward only iteration over tree data.
+// The class also provides `Skip()`, `Seek()` and `Read()` methods similar to
+// CordRepBtreeNavigator that allow more advanced navigation.
+//
+// Example: iterate over all data inside a cord btree:
+//
+//   CordRepBtreeReader reader;
+//   for (string_view sv = reader.Init(tree); !sv.Empty(); sv = sv.Next()) {
+//     DoSomethingWithDataIn(sv);
+//   }
+//
+// All navigation methods always return the next 'chunk' of data. The class
+// assumes that all data is directly 'consumed' by the caller. For example:
+// invoking `Skip()` will skip the desired number of bytes, and directly
+// read and return the next chunk of data directly after the skipped bytes.
+//
+// Example: iterate over all data inside a btree skipping the first 100 bytes:
+//
+//   CordRepBtreeReader reader;
+//   absl::string_view sv = reader.Init(tree);
+//   if (sv.length() > 100) {
+//     sv.RemovePrefix(100);
+//   } else {
+//     sv = reader.Skip(100 - sv.length());
+//   }
+//   while (!sv.empty()) {
+//     DoSomethingWithDataIn(sv);
+//     absl::string_view sv = reader.Next();
+//   }
+//
+// It is important to notice that `remaining` is based on the end position of
+// the last data edge returned to the caller, not the cumulative data returned
+// to the caller which can be less in cases of skipping or seeking over data.
+//
+// For example, consider a cord btree with five data edges: "abc", "def", "ghi",
+// "jkl" and "mno":
+//
+//   absl::string_view sv;
+//   CordRepBtreeReader reader;
+//
+//   sv = reader.Init(tree); // sv = "abc", remaining = 12
+//   sv = reader.Skip(4);    // sv = "hi",  remaining = 6
+//   sv = reader.Skip(2);    // sv = "l",   remaining = 3
+//   sv = reader.Next();     // sv = "mno", remaining = 0
+//   sv = reader.Seek(1);    // sv = "bc", remaining = 12
+//
+class CordRepBtreeReader {
+ public:
+  using ReadResult = CordRepBtreeNavigator::ReadResult;
+  using Position = CordRepBtreeNavigator::Position;
+
+  // Returns true if this instance is not empty.
+  explicit operator bool() const { return navigator_.btree() != nullptr; }
+
+  // Returns the tree referenced by this instance or nullptr if empty.
+  CordRepBtree* btree() const { return navigator_.btree(); }
+
+  // Returns the current data edge inside the referenced btree.
+  // Requires that the current instance is not empty.
+  CordRep* node() const { return navigator_.Current(); }
+
+  // Returns the length of the referenced tree.
+  // Requires that the current instance is not empty.
+  size_t length() const;
+
+  // Returns the number of remaining bytes available for iteration, which is the
+  // number of bytes directly following the end of the last chunk returned.
+  // This value will be zero if we iterated over the last edge in the bound
+  // tree, in which case any call to Next() or Skip() will return an empty
+  // string_view reflecting the EOF state.
+  // Note that a call to `Seek()` resets `remaining` to a value based on the
+  // end position of the chunk returned by that call.
+  size_t remaining() const { return remaining_; }
+
+  // Resets this instance to an empty value.
+  void Reset() { navigator_.Reset(); }
+
+  // Initializes this instance with `tree`. `tree` must not be null.
+  // Returns a reference to the first data edge of the provided tree.
+  absl::string_view Init(CordRepBtree* tree);
+
+  // Navigates to and returns the next data edge of the referenced tree.
+  // Returns an empty string_view if an attempt is made to read beyond the end
+  // of the tree, i.e.: if `remaining()` is zero indicating an EOF condition.
+  // Requires that the current instance is not empty.
+  absl::string_view Next();
+
+  // Skips the provided amount of bytes and returns a reference to the data
+  // directly following the skipped bytes.
+  absl::string_view Skip(size_t skip);
+
+  // Reads `n` bytes into `tree`.
+  // If `chunk_size` is zero, starts reading at the next data edge. If
+  // `chunk_size` is non zero, the read starts at the last `chunk_size` bytes of
+  // the last returned data edge. Effectively, this means that the read starts
+  // at offset `consumed() - chunk_size`.
+  // Requires that `chunk_size` is less than or equal to the length of the
+  // last returned data edge. The purpose of `chunk_size` is to simplify code
+  // partially consuming a returned chunk and wanting to include the remaining
+  // bytes in the Read call. For example, the below code will read 1000 bytes of
+  // data into a cord tree if the first chunk starts with "big:":
+  //
+  //   CordRepBtreeReader reader;
+  //   absl::string_view sv = reader.Init(tree);
+  //   if (absl::StartsWith(sv, "big:")) {
+  //     CordRepBtree tree;
+  //     sv = reader.Read(1000, sv.size() - 4 /* "big:" */, &tree);
+  //   }
+  //
+  // This method will return an empty string view if all remaining data was
+  // read. If `n` exceeded the amount of remaining data this function will
+  // return an empty string view and `tree` will be set to nullptr.
+  // In both cases, `consumed` will be set to `length`.
+  absl::string_view Read(size_t n, size_t chunk_size, CordRep*& tree);
+
+  // Navigates to the chunk at offset `offset`.
+  // Returns a reference into the navigated to chunk, adjusted for the relative
+  // position of `offset` into that chunk. For example, calling `Seek(13)` on a
+  // cord tree containing 2 chunks of 10 and 20 bytes respectively will return
+  // a string view into the second chunk starting at offset 3 with a size of 17.
+  // Returns an empty string view if `offset` is equal to or greater than the
+  // length of the referenced tree.
+  absl::string_view Seek(size_t offset);
+
+ private:
+  size_t remaining_ = 0;
+  CordRepBtreeNavigator navigator_;
+};
+
+inline size_t CordRepBtreeReader::length() const {
+  assert(btree() != nullptr);
+  return btree()->length;
+}
+
+inline absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) {
+  assert(tree != nullptr);
+  const CordRep* edge = navigator_.InitFirst(tree);
+  remaining_ = tree->length - edge->length;
+  return EdgeData(edge);
+}
+
+inline absl::string_view CordRepBtreeReader::Next() {
+  if (remaining_ == 0) return {};
+  const CordRep* edge = navigator_.Next();
+  assert(edge != nullptr);
+  remaining_ -= edge->length;
+  return EdgeData(edge);
+}
+
+inline absl::string_view CordRepBtreeReader::Skip(size_t skip) {
+  // As we are always positioned on the last 'consumed' edge, we
+  // need to skip the current edge as well as `skip`.
+  const size_t edge_length = navigator_.Current()->length;
+  CordRepBtreeNavigator::Position pos = navigator_.Skip(skip + edge_length);
+  if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) {
+    remaining_ = 0;
+    return {};
+  }
+  // The combined length of all edges skipped before `pos.edge` is `skip -
+  // pos.offset`, all of which are 'consumed', as well as the current edge.
+  remaining_ -= skip - pos.offset + pos.edge->length;
+  return EdgeData(pos.edge).substr(pos.offset);
+}
+
+inline absl::string_view CordRepBtreeReader::Seek(size_t offset) {
+  const CordRepBtreeNavigator::Position pos = navigator_.Seek(offset);
+  if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) {
+    remaining_ = 0;
+    return {};
+  }
+  absl::string_view chunk = EdgeData(pos.edge).substr(pos.offset);
+  remaining_ = length() - offset - chunk.length();
+  return chunk;
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc b/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc
new file mode 100644
index 0000000..b4cdd8e
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc
@@ -0,0 +1,293 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_btree_reader.h"
+
+#include <iostream>
+#include <random>
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::Ne;
+using ::testing::Not;
+
+using ::absl::cordrep_testing::CordRepBtreeFromFlats;
+using ::absl::cordrep_testing::MakeFlat;
+using ::absl::cordrep_testing::CordToString;
+using ::absl::cordrep_testing::CreateFlatsFromString;
+using ::absl::cordrep_testing::CreateRandomString;
+
+using ReadResult = CordRepBtreeReader::ReadResult;
+
+TEST(CordRepBtreeReaderTest, Next) {
+  constexpr size_t kChars = 3;
+  const size_t cap = CordRepBtree::kMaxCapacity;
+  size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
+
+  for (size_t count : counts) {
+    std::string data = CreateRandomString(count * kChars);
+    std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
+    CordRepBtree* node = CordRepBtreeFromFlats(flats);
+
+    CordRepBtreeReader reader;
+    size_t remaining = data.length();
+    absl::string_view chunk = reader.Init(node);
+    EXPECT_THAT(chunk, Eq(data.substr(0, chunk.length())));
+
+    remaining -= chunk.length();
+    EXPECT_THAT(reader.remaining(), Eq(remaining));
+
+    while (remaining > 0) {
+      const size_t offset = data.length() - remaining;
+      chunk = reader.Next();
+      EXPECT_THAT(chunk, Eq(data.substr(offset, chunk.length())));
+
+      remaining -= chunk.length();
+      EXPECT_THAT(reader.remaining(), Eq(remaining));
+    }
+
+    EXPECT_THAT(reader.remaining(), Eq(0u));
+
+    // Verify trying to read beyond EOF returns empty string_view
+    EXPECT_THAT(reader.Next(), testing::IsEmpty());
+
+    CordRep::Unref(node);
+  }
+}
+
+TEST(CordRepBtreeReaderTest, Skip) {
+  constexpr size_t kChars = 3;
+  const size_t cap = CordRepBtree::kMaxCapacity;
+  size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
+
+  for (size_t count : counts) {
+    std::string data = CreateRandomString(count * kChars);
+    std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
+    CordRepBtree* node = CordRepBtreeFromFlats(flats);
+
+    for (size_t skip1 = 0; skip1 < data.length() - kChars; ++skip1) {
+      for (size_t skip2 = 0; skip2 < data.length() - kChars; ++skip2) {
+        CordRepBtreeReader reader;
+        size_t remaining = data.length();
+        absl::string_view chunk = reader.Init(node);
+        remaining -= chunk.length();
+
+        chunk = reader.Skip(skip1);
+        size_t offset = data.length() - remaining;
+        ASSERT_THAT(chunk, Eq(data.substr(offset + skip1, chunk.length())));
+        remaining -= chunk.length() + skip1;
+        ASSERT_THAT(reader.remaining(), Eq(remaining));
+
+        if (remaining == 0) continue;
+
+        size_t skip = std::min(remaining - 1, skip2);
+        chunk = reader.Skip(skip);
+        offset = data.length() - remaining;
+        ASSERT_THAT(chunk, Eq(data.substr(offset + skip, chunk.length())));
+      }
+    }
+
+    CordRep::Unref(node);
+  }
+}
+
+TEST(CordRepBtreeReaderTest, SkipBeyondLength) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
+  tree = CordRepBtree::Append(tree, MakeFlat("def"));
+  CordRepBtreeReader reader;
+  reader.Init(tree);
+  EXPECT_THAT(reader.Skip(100), IsEmpty());
+  EXPECT_THAT(reader.remaining(), Eq(0u));
+  CordRep::Unref(tree);
+}
+
+TEST(CordRepBtreeReaderTest, Seek) {
+  constexpr size_t kChars = 3;
+  const size_t cap = CordRepBtree::kMaxCapacity;
+  size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
+
+  for (size_t count : counts) {
+    std::string data = CreateRandomString(count * kChars);
+    std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
+    CordRepBtree* node = CordRepBtreeFromFlats(flats);
+
+    for (size_t seek = 0; seek < data.length() - 1; ++seek) {
+      CordRepBtreeReader reader;
+      reader.Init(node);
+      absl::string_view chunk = reader.Seek(seek);
+      ASSERT_THAT(chunk, Not(IsEmpty()));
+      ASSERT_THAT(chunk, Eq(data.substr(seek, chunk.length())));
+      ASSERT_THAT(reader.remaining(),
+                  Eq(data.length() - seek - chunk.length()));
+    }
+
+    CordRep::Unref(node);
+  }
+}
+
+TEST(CordRepBtreeReaderTest, SeekBeyondLength) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
+  tree = CordRepBtree::Append(tree, MakeFlat("def"));
+  CordRepBtreeReader reader;
+  reader.Init(tree);
+  EXPECT_THAT(reader.Seek(6), IsEmpty());
+  EXPECT_THAT(reader.remaining(), Eq(0u));
+  EXPECT_THAT(reader.Seek(100), IsEmpty());
+  EXPECT_THAT(reader.remaining(), Eq(0u));
+  CordRep::Unref(tree);
+}
+
+TEST(CordRepBtreeReaderTest, Read) {
+  std::string data = "abcdefghijklmno";
+  std::vector<CordRep*> flats = CreateFlatsFromString(data, 5);
+  CordRepBtree* node = CordRepBtreeFromFlats(flats);
+
+  CordRep* tree;
+  CordRepBtreeReader reader;
+  absl::string_view chunk;
+
+  // Read zero bytes
+  chunk = reader.Init(node);
+  chunk = reader.Read(0, chunk.length(), tree);
+  EXPECT_THAT(tree, Eq(nullptr));
+  EXPECT_THAT(chunk, Eq("abcde"));
+  EXPECT_THAT(reader.remaining(), Eq(10u));
+  EXPECT_THAT(reader.Next(), Eq("fghij"));
+
+  // Read in full
+  chunk = reader.Init(node);
+  chunk = reader.Read(15, chunk.length(), tree);
+  EXPECT_THAT(tree, Ne(nullptr));
+  EXPECT_THAT(CordToString(tree), Eq("abcdefghijklmno"));
+  EXPECT_THAT(chunk, Eq(""));
+  EXPECT_THAT(reader.remaining(), Eq(0u));
+  CordRep::Unref(tree);
+
+  // Read < chunk bytes
+  chunk = reader.Init(node);
+  chunk = reader.Read(3, chunk.length(), tree);
+  ASSERT_THAT(tree, Ne(nullptr));
+  EXPECT_THAT(CordToString(tree), Eq("abc"));
+  EXPECT_THAT(chunk, Eq("de"));
+  EXPECT_THAT(reader.remaining(), Eq(10u));
+  EXPECT_THAT(reader.Next(), Eq("fghij"));
+  CordRep::Unref(tree);
+
+  // Read < chunk bytes at offset
+  chunk = reader.Init(node);
+  chunk = reader.Read(2, chunk.length() - 2, tree);
+  ASSERT_THAT(tree, Ne(nullptr));
+  EXPECT_THAT(CordToString(tree), Eq("cd"));
+  EXPECT_THAT(chunk, Eq("e"));
+  EXPECT_THAT(reader.remaining(), Eq(10u));
+  EXPECT_THAT(reader.Next(), Eq("fghij"));
+  CordRep::Unref(tree);
+
+  // Read from consumed chunk
+  chunk = reader.Init(node);
+  chunk = reader.Read(3, 0, tree);
+  ASSERT_THAT(tree, Ne(nullptr));
+  EXPECT_THAT(CordToString(tree), Eq("fgh"));
+  EXPECT_THAT(chunk, Eq("ij"));
+  EXPECT_THAT(reader.remaining(), Eq(5u));
+  EXPECT_THAT(reader.Next(), Eq("klmno"));
+  CordRep::Unref(tree);
+
+  // Read across chunks
+  chunk = reader.Init(node);
+  chunk = reader.Read(12, chunk.length() - 2, tree);
+  ASSERT_THAT(tree, Ne(nullptr));
+  EXPECT_THAT(CordToString(tree), Eq("cdefghijklmn"));
+  EXPECT_THAT(chunk, Eq("o"));
+  EXPECT_THAT(reader.remaining(), Eq(0u));
+  CordRep::Unref(tree);
+
+  // Read across chunks landing on exact edge boundary
+  chunk = reader.Init(node);
+  chunk = reader.Read(10 - 2, chunk.length() - 2, tree);
+  ASSERT_THAT(tree, Ne(nullptr));
+  EXPECT_THAT(CordToString(tree), Eq("cdefghij"));
+  EXPECT_THAT(chunk, Eq("klmno"));
+  EXPECT_THAT(reader.remaining(), Eq(0u));
+  CordRep::Unref(tree);
+
+  CordRep::Unref(node);
+}
+
+TEST(CordRepBtreeReaderTest, ReadExhaustive) {
+  constexpr size_t kChars = 3;
+  const size_t cap = CordRepBtree::kMaxCapacity;
+  size_t counts[] = {1, 2, cap, cap * cap + 1, cap * cap * cap * 2 + 17};
+
+  for (size_t count : counts) {
+    std::string data = CreateRandomString(count * kChars);
+    std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
+    CordRepBtree* node = CordRepBtreeFromFlats(flats);
+
+    for (size_t read_size : {kChars - 1, kChars, kChars + 7, cap * cap}) {
+      CordRepBtreeReader reader;
+      absl::string_view chunk = reader.Init(node);
+
+      // `consumed` tracks the end of last consumed chunk which is the start of
+      // the next chunk: we always read with `chunk_size = chunk.length()`.
+      size_t consumed = 0;
+      size_t remaining = data.length();
+      while (remaining > 0) {
+        CordRep* tree;
+        size_t n = (std::min)(remaining, read_size);
+        chunk = reader.Read(n, chunk.length(), tree);
+        EXPECT_THAT(tree, Ne(nullptr));
+        if (tree) {
+          EXPECT_THAT(CordToString(tree), Eq(data.substr(consumed, n)));
+          CordRep::Unref(tree);
+        }
+
+        consumed += n;
+        remaining -= n;
+        EXPECT_THAT(reader.remaining(), Eq(remaining - chunk.length()));
+
+        if (remaining > 0) {
+          ASSERT_FALSE(chunk.empty());
+          ASSERT_THAT(chunk, Eq(data.substr(consumed, chunk.length())));
+        } else {
+          ASSERT_TRUE(chunk.empty()) << chunk;
+        }
+      }
+    }
+
+    CordRep::Unref(node);
+  }
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc b/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc
new file mode 100644
index 0000000..840acf9
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc
@@ -0,0 +1,1568 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_btree.h"
+
+#include <cmath>
+#include <deque>
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/cleanup/cleanup.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+class CordRepBtreeTestPeer {
+ public:
+  static void SetEdge(CordRepBtree* node, size_t idx, CordRep* edge) {
+    node->edges_[idx] = edge;
+  }
+  static void AddEdge(CordRepBtree* node, CordRep* edge) {
+    node->edges_[node->fetch_add_end(1)] = edge;
+  }
+};
+
+namespace {
+
+using ::absl::cordrep_testing::AutoUnref;
+using ::absl::cordrep_testing::CordCollectRepsIf;
+using ::absl::cordrep_testing::CordToString;
+using ::absl::cordrep_testing::CordVisitReps;
+using ::absl::cordrep_testing::CreateFlatsFromString;
+using ::absl::cordrep_testing::CreateRandomString;
+using ::absl::cordrep_testing::MakeExternal;
+using ::absl::cordrep_testing::MakeFlat;
+using ::absl::cordrep_testing::MakeSubstring;
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::AnyOf;
+using ::testing::Conditional;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::HasSubstr;
+using ::testing::Le;
+using ::testing::Ne;
+using ::testing::Not;
+using ::testing::SizeIs;
+using ::testing::TypedEq;
+
+MATCHER_P(EqFlatHolding, data, "Equals flat holding data") {
+  if (arg->tag < FLAT) {
+    *result_listener << "Expected FLAT, got tag " << static_cast<int>(arg->tag);
+    return false;
+  }
+  std::string actual = CordToString(arg);
+  if (actual != data) {
+    *result_listener << "Expected flat holding \"" << data
+                     << "\", got flat holding \"" << actual << "\"";
+    return false;
+  }
+  return true;
+}
+
+MATCHER_P(IsNode, height, absl::StrCat("Is a valid node of height ", height)) {
+  if (arg == nullptr) {
+    *result_listener << "Expected NODE, got nullptr";
+    return false;
+  }
+  if (arg->tag != BTREE) {
+    *result_listener << "Expected NODE, got " << static_cast<int>(arg->tag);
+    return false;
+  }
+  if (!CordRepBtree::IsValid(arg->btree())) {
+    CordRepBtree::Dump(arg->btree(), "Expected valid NODE, got:", false,
+                       *result_listener->stream());
+    return false;
+  }
+  if (arg->btree()->height() != height) {
+    *result_listener << "Expected NODE of height " << height << ", got "
+                     << arg->btree()->height();
+    return false;
+  }
+  return true;
+}
+
+MATCHER_P2(IsSubstring, start, length,
+           absl::StrCat("Is a substring(start = ", start, ", length = ", length,
+                        ")")) {
+  if (arg == nullptr) {
+    *result_listener << "Expected substring, got nullptr";
+    return false;
+  }
+  if (arg->tag != SUBSTRING) {
+    *result_listener << "Expected SUBSTRING, got "
+                     << static_cast<int>(arg->tag);
+    return false;
+  }
+  const CordRepSubstring* const substr = arg->substring();
+  if (substr->start != start || substr->length != length) {
+    *result_listener << "Expected substring(" << start << ", " << length
+                     << "), got substring(" << substr->start << ", "
+                     << substr->length << ")";
+    return false;
+  }
+  return true;
+}
+
+MATCHER_P2(EqExtractResult, tree, rep, "Equals ExtractResult") {
+  if (arg.tree != tree || arg.extracted != rep) {
+    *result_listener << "Expected {" << static_cast<const void*>(tree) << ", "
+                     << static_cast<const void*>(rep) << "}, got {" << arg.tree
+                     << ", " << arg.extracted << "}";
+    return false;
+  }
+  return true;
+}
+
+// DataConsumer is a simple helper class used by tests to 'consume' string
+// fragments from the provided input in forward or backward direction.
+class DataConsumer {
+ public:
+  // Starts consumption of `data`. Caller must make sure `data` outlives this
+  // instance. Consumes data starting at the front if `forward` is true, else
+  // consumes data from the back.
+  DataConsumer(absl::string_view data, bool forward)
+      : data_(data), forward_(forward) {}
+
+  // Return the next `n` bytes from referenced data.
+  absl::string_view Next(size_t n) {
+    assert(n <= data_.size() - consumed_);
+    consumed_ += n;
+    return data_.substr(forward_ ? consumed_ - n : data_.size() - consumed_, n);
+  }
+
+  // Returns all data consumed so far.
+  absl::string_view Consumed() const {
+    return forward_ ? data_.substr(0, consumed_)
+                    : data_.substr(data_.size() - consumed_);
+  }
+
+ private:
+  absl::string_view data_;
+  size_t consumed_ = 0;
+  bool forward_;
+};
+
+// BtreeAdd returns either CordRepBtree::Append or CordRepBtree::Prepend.
+CordRepBtree* BtreeAdd(CordRepBtree* node, bool append,
+                       absl::string_view data) {
+  return append ? CordRepBtree::Append(node, data)
+                : CordRepBtree::Prepend(node, data);
+}
+
+// Recursively collects all leaf edges from `tree` and appends them to `edges`.
+void GetLeafEdges(const CordRepBtree* tree, std::vector<CordRep*>& edges) {
+  if (tree->height() == 0) {
+    for (CordRep* edge : tree->Edges()) {
+      edges.push_back(edge);
+    }
+  } else {
+    for (CordRep* edge : tree->Edges()) {
+      GetLeafEdges(edge->btree(), edges);
+    }
+  }
+}
+
+// Recursively collects and returns all leaf edges from `tree`.
+std::vector<CordRep*> GetLeafEdges(const CordRepBtree* tree) {
+  std::vector<CordRep*> edges;
+  GetLeafEdges(tree, edges);
+  return edges;
+}
+
+// Creates a flat containing the hexadecimal value of `i` zero padded
+// to at least 4 digits prefixed with "0x", e.g.: "0x04AC".
+CordRepFlat* MakeHexFlat(size_t i) {
+  return MakeFlat(absl::StrCat("0x", absl::Hex(i, absl::kZeroPad4)));
+}
+
+CordRepBtree* MakeLeaf(size_t size = CordRepBtree::kMaxCapacity) {
+  assert(size <= CordRepBtree::kMaxCapacity);
+  CordRepBtree* leaf = CordRepBtree::Create(MakeHexFlat(0));
+  for (size_t i = 1; i < size; ++i) {
+    leaf = CordRepBtree::Append(leaf, MakeHexFlat(i));
+  }
+  return leaf;
+}
+
+CordRepBtree* MakeTree(size_t size, bool append = true) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeHexFlat(0));
+  for (size_t i = 1; i < size; ++i) {
+    tree = append ? CordRepBtree::Append(tree, MakeHexFlat(i))
+                  : CordRepBtree::Prepend(tree, MakeHexFlat(i));
+  }
+  return tree;
+}
+
+CordRepBtree* CreateTree(absl::Span<CordRep* const> reps) {
+  auto it = reps.begin();
+  CordRepBtree* tree = CordRepBtree::Create(*it);
+  while (++it != reps.end()) tree = CordRepBtree::Append(tree, *it);
+  return tree;
+}
+
+CordRepBtree* CreateTree(absl::string_view data, size_t chunk_size) {
+  return CreateTree(CreateFlatsFromString(data, chunk_size));
+}
+
+CordRepBtree* CreateTreeReverse(absl::string_view data, size_t chunk_size) {
+  std::vector<CordRep*> flats = CreateFlatsFromString(data, chunk_size);
+  auto rit = flats.rbegin();
+  CordRepBtree* tree = CordRepBtree::Create(*rit);
+  while (++rit != flats.rend()) tree = CordRepBtree::Prepend(tree, *rit);
+  return tree;
+}
+
+class CordRepBtreeTest : public testing::TestWithParam<bool> {
+ public:
+  bool shared() const { return GetParam(); }
+
+  static std::string ToString(testing::TestParamInfo<bool> param) {
+    return param.param ? "Shared" : "Private";
+  }
+};
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeTest, testing::Bool(),
+                         CordRepBtreeTest::ToString);
+
+class CordRepBtreeHeightTest : public testing::TestWithParam<int> {
+ public:
+  int height() const { return GetParam(); }
+
+  static std::string ToString(testing::TestParamInfo<int> param) {
+    return absl::StrCat(param.param);
+  }
+};
+
+INSTANTIATE_TEST_SUITE_P(WithHeights, CordRepBtreeHeightTest,
+                         testing::Range(0, CordRepBtree::kMaxHeight),
+                         CordRepBtreeHeightTest::ToString);
+
+using TwoBools = testing::tuple<bool, bool>;
+
+class CordRepBtreeDualTest : public testing::TestWithParam<TwoBools> {
+ public:
+  bool first_shared() const { return std::get<0>(GetParam()); }
+  bool second_shared() const { return std::get<1>(GetParam()); }
+
+  static std::string ToString(testing::TestParamInfo<TwoBools> param) {
+    if (std::get<0>(param.param)) {
+      return std::get<1>(param.param) ? "BothShared" : "FirstShared";
+    }
+    return std::get<1>(param.param) ? "SecondShared" : "Private";
+  }
+};
+
+INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeDualTest,
+                         testing::Combine(testing::Bool(), testing::Bool()),
+                         CordRepBtreeDualTest::ToString);
+
+TEST(CordRepBtreeTest, SizeIsMultipleOf64) {
+  // Only enforce for fully 64-bit platforms.
+  if (sizeof(size_t) == 8 && sizeof(void*) == 8) {
+    EXPECT_THAT(sizeof(CordRepBtree) % 64, Eq(0u))
+        << "Should be multiple of 64";
+  }
+}
+
+TEST(CordRepBtreeTest, NewDestroyEmptyTree) {
+  auto* tree = CordRepBtree::New();
+  EXPECT_THAT(tree->size(), Eq(0u));
+  EXPECT_THAT(tree->height(), Eq(0));
+  EXPECT_THAT(tree->Edges(), ElementsAre());
+  CordRepBtree::Destroy(tree);
+}
+
+TEST(CordRepBtreeTest, NewDestroyEmptyTreeAtHeight) {
+  auto* tree = CordRepBtree::New(3);
+  EXPECT_THAT(tree->size(), Eq(0u));
+  EXPECT_THAT(tree->height(), Eq(3));
+  EXPECT_THAT(tree->Edges(), ElementsAre());
+  CordRepBtree::Destroy(tree);
+}
+
+TEST(CordRepBtreeTest, Btree) {
+  CordRep* rep = CordRepBtree::New();
+  EXPECT_THAT(rep->btree(), Eq(rep));
+  EXPECT_THAT(static_cast<const CordRep*>(rep)->btree(), Eq(rep));
+  CordRep::Unref(rep);
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+  rep = MakeFlat("Hello world");
+  EXPECT_DEATH(rep->btree(), ".*");
+  EXPECT_DEATH(static_cast<const CordRep*>(rep)->btree(), ".*");
+  CordRep::Unref(rep);
+#endif
+}
+
+TEST(CordRepBtreeTest, EdgeData) {
+  CordRepFlat* flat = MakeFlat("Hello world");
+  CordRepExternal* external = MakeExternal("Hello external");
+  CordRep* substr1 = MakeSubstring(1, 6, CordRep::Ref(flat));
+  CordRep* substr2 = MakeSubstring(1, 6, CordRep::Ref(external));
+  CordRep* bad_substr = MakeSubstring(1, 2, CordRep::Ref(substr1));
+
+  EXPECT_TRUE(IsDataEdge(flat));
+  EXPECT_THAT(EdgeData(flat).data(), TypedEq<const void*>(flat->Data()));
+  EXPECT_THAT(EdgeData(flat), Eq("Hello world"));
+
+  EXPECT_TRUE(IsDataEdge(external));
+  EXPECT_THAT(EdgeData(external).data(), TypedEq<const void*>(external->base));
+  EXPECT_THAT(EdgeData(external), Eq("Hello external"));
+
+  EXPECT_TRUE(IsDataEdge(substr1));
+  EXPECT_THAT(EdgeData(substr1).data(), TypedEq<const void*>(flat->Data() + 1));
+  EXPECT_THAT(EdgeData(substr1), Eq("ello w"));
+
+  EXPECT_TRUE(IsDataEdge(substr2));
+  EXPECT_THAT(EdgeData(substr2).data(),
+              TypedEq<const void*>(external->base + 1));
+  EXPECT_THAT(EdgeData(substr2), Eq("ello e"));
+
+  EXPECT_FALSE(IsDataEdge(bad_substr));
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+  EXPECT_DEATH(EdgeData(bad_substr), ".*");
+#endif
+
+  CordRep::Unref(bad_substr);
+  CordRep::Unref(substr2);
+  CordRep::Unref(substr1);
+  CordRep::Unref(external);
+  CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, CreateUnrefLeaf) {
+  auto* flat = MakeFlat("a");
+  auto* leaf = CordRepBtree::Create(flat);
+  EXPECT_THAT(leaf->size(), Eq(1u));
+  EXPECT_THAT(leaf->height(), Eq(0));
+  EXPECT_THAT(leaf->Edges(), ElementsAre(flat));
+  CordRepBtree::Unref(leaf);
+}
+
+TEST(CordRepBtreeTest, NewUnrefNode) {
+  auto* leaf = CordRepBtree::Create(MakeFlat("a"));
+  CordRepBtree* tree = CordRepBtree::New(leaf);
+  EXPECT_THAT(tree->size(), Eq(1u));
+  EXPECT_THAT(tree->height(), Eq(1));
+  EXPECT_THAT(tree->Edges(), ElementsAre(leaf));
+  CordRepBtree::Unref(tree);
+}
+
+TEST_P(CordRepBtreeTest, AppendToLeafToCapacity) {
+  AutoUnref refs;
+  std::vector<CordRep*> flats;
+  flats.push_back(MakeHexFlat(0));
+  auto* leaf = CordRepBtree::Create(flats.back());
+
+  for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
+    refs.RefIf(shared(), leaf);
+    flats.push_back(MakeHexFlat(i));
+    auto* result = CordRepBtree::Append(leaf, flats.back());
+    EXPECT_THAT(result->height(), Eq(0));
+    EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
+    EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
+    leaf = result;
+  }
+  CordRep::Unref(leaf);
+}
+
+TEST_P(CordRepBtreeTest, PrependToLeafToCapacity) {
+  AutoUnref refs;
+  std::deque<CordRep*> flats;
+  flats.push_front(MakeHexFlat(0));
+  auto* leaf = CordRepBtree::Create(flats.front());
+
+  for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
+    refs.RefIf(shared(), leaf);
+    flats.push_front(MakeHexFlat(i));
+    auto* result = CordRepBtree::Prepend(leaf, flats.front());
+    EXPECT_THAT(result->height(), Eq(0));
+    EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
+    EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
+    leaf = result;
+  }
+  CordRep::Unref(leaf);
+}
+
+// This test specifically aims at code aligning data at either the front or the
+// back of the contained `edges[]` array, alternating Append and Prepend will
+// move `begin()` and `end()` values as needed for each added value.
+TEST_P(CordRepBtreeTest, AppendPrependToLeafToCapacity) {
+  AutoUnref refs;
+  std::deque<CordRep*> flats;
+  flats.push_front(MakeHexFlat(0));
+  auto* leaf = CordRepBtree::Create(flats.front());
+
+  for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
+    refs.RefIf(shared(), leaf);
+    CordRepBtree* result;
+    if (i % 2 != 0) {
+      flats.push_front(MakeHexFlat(i));
+      result = CordRepBtree::Prepend(leaf, flats.front());
+    } else {
+      flats.push_back(MakeHexFlat(i));
+      result = CordRepBtree::Append(leaf, flats.back());
+    }
+    EXPECT_THAT(result->height(), Eq(0));
+    EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
+    EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
+    leaf = result;
+  }
+  CordRep::Unref(leaf);
+}
+
+TEST_P(CordRepBtreeTest, AppendToLeafBeyondCapacity) {
+  AutoUnref refs;
+  auto* leaf = MakeLeaf();
+  refs.RefIf(shared(), leaf);
+  CordRep* flat = MakeFlat("abc");
+  auto* result = CordRepBtree::Append(leaf, flat);
+  ASSERT_THAT(result, IsNode(1));
+  EXPECT_THAT(result, Ne(leaf));
+  absl::Span<CordRep* const> edges = result->Edges();
+  ASSERT_THAT(edges, ElementsAre(leaf, IsNode(0)));
+  EXPECT_THAT(edges[1]->btree()->Edges(), ElementsAre(flat));
+  CordRep::Unref(result);
+}
+
+TEST_P(CordRepBtreeTest, PrependToLeafBeyondCapacity) {
+  AutoUnref refs;
+  auto* leaf = MakeLeaf();
+  refs.RefIf(shared(), leaf);
+  CordRep* flat = MakeFlat("abc");
+  auto* result = CordRepBtree::Prepend(leaf, flat);
+  ASSERT_THAT(result, IsNode(1));
+  EXPECT_THAT(result, Ne(leaf));
+  absl::Span<CordRep* const> edges = result->Edges();
+  ASSERT_THAT(edges, ElementsAre(IsNode(0), leaf));
+  EXPECT_THAT(edges[0]->btree()->Edges(), ElementsAre(flat));
+  CordRep::Unref(result);
+}
+
+TEST_P(CordRepBtreeTest, AppendToTreeOneDeep) {
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  AutoUnref refs;
+  std::vector<CordRep*> flats;
+  flats.push_back(MakeHexFlat(0));
+  CordRepBtree* tree = CordRepBtree::Create(flats.back());
+  for (size_t i = 1; i <= max_cap; ++i) {
+    flats.push_back(MakeHexFlat(i));
+    tree = CordRepBtree::Append(tree, flats.back());
+  }
+  ASSERT_THAT(tree, IsNode(1));
+
+  for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
+    // Ref top level tree based on param.
+    // Ref leaf node once every 4 iterations, which should not have an
+    // observable effect other than that the leaf itself is copied.
+    refs.RefIf(shared(), tree);
+    refs.RefIf(i % 4 == 0, tree->Edges().back());
+
+    flats.push_back(MakeHexFlat(i));
+    CordRepBtree* result = CordRepBtree::Append(tree, flats.back());
+    ASSERT_THAT(result, IsNode(1));
+    ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+    std::vector<CordRep*> edges = GetLeafEdges(result);
+    ASSERT_THAT(edges, ElementsAreArray(flats));
+    tree = result;
+  }
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeTest, AppendToTreeTwoDeep) {
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  AutoUnref refs;
+  std::vector<CordRep*> flats;
+  flats.push_back(MakeHexFlat(0));
+  CordRepBtree* tree = CordRepBtree::Create(flats.back());
+  for (size_t i = 1; i <= max_cap * max_cap; ++i) {
+    flats.push_back(MakeHexFlat(i));
+    tree = CordRepBtree::Append(tree, flats.back());
+  }
+  ASSERT_THAT(tree, IsNode(2));
+  for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
+    // Ref top level tree based on param.
+    // Ref child node once every 16 iterations, and leaf node every 4
+    // iterations which  which should not have an observable effect other than
+    //  the node and/or the leaf below it being copied.
+    refs.RefIf(shared(), tree);
+    refs.RefIf(i % 16 == 0, tree->Edges().back());
+    refs.RefIf(i % 4 == 0, tree->Edges().back()->btree()->Edges().back());
+
+    flats.push_back(MakeHexFlat(i));
+    CordRepBtree* result = CordRepBtree::Append(tree, flats.back());
+    ASSERT_THAT(result, IsNode(2));
+    ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+    std::vector<CordRep*> edges = GetLeafEdges(result);
+    ASSERT_THAT(edges, ElementsAreArray(flats));
+    tree = result;
+  }
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeTest, PrependToTreeOneDeep) {
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  AutoUnref refs;
+  std::deque<CordRep*> flats;
+  flats.push_back(MakeHexFlat(0));
+  CordRepBtree* tree = CordRepBtree::Create(flats.back());
+  for (size_t i = 1; i <= max_cap; ++i) {
+    flats.push_front(MakeHexFlat(i));
+    tree = CordRepBtree::Prepend(tree, flats.front());
+  }
+  ASSERT_THAT(tree, IsNode(1));
+
+  for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
+    // Ref top level tree based on param.
+    // Ref leaf node once every 4 iterations which should not have an observable
+    // effect other than than the leaf itself is copied.
+    refs.RefIf(shared(), tree);
+    refs.RefIf(i % 4 == 0, tree->Edges().back());
+
+    flats.push_front(MakeHexFlat(i));
+    CordRepBtree* result = CordRepBtree::Prepend(tree, flats.front());
+    ASSERT_THAT(result, IsNode(1));
+    ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+    std::vector<CordRep*> edges = GetLeafEdges(result);
+    ASSERT_THAT(edges, ElementsAreArray(flats));
+    tree = result;
+  }
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeTest, PrependToTreeTwoDeep) {
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  AutoUnref refs;
+  std::deque<CordRep*> flats;
+  flats.push_back(MakeHexFlat(0));
+  CordRepBtree* tree = CordRepBtree::Create(flats.back());
+  for (size_t i = 1; i <= max_cap * max_cap; ++i) {
+    flats.push_front(MakeHexFlat(i));
+    tree = CordRepBtree::Prepend(tree, flats.front());
+  }
+  ASSERT_THAT(tree, IsNode(2));
+  for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
+    // Ref top level tree based on param.
+    // Ref child node once every 16 iterations, and leaf node every 4
+    // iterations which  which should not have an observable effect other than
+    //  the node and/or the leaf below it being copied.
+    refs.RefIf(shared(), tree);
+    refs.RefIf(i % 16 == 0, tree->Edges().back());
+    refs.RefIf(i % 4 == 0, tree->Edges().back()->btree()->Edges().back());
+
+    flats.push_front(MakeHexFlat(i));
+    CordRepBtree* result = CordRepBtree::Prepend(tree, flats.front());
+    ASSERT_THAT(result, IsNode(2));
+    ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+    std::vector<CordRep*> edges = GetLeafEdges(result);
+    ASSERT_THAT(edges, ElementsAreArray(flats));
+    tree = result;
+  }
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeDualTest, MergeLeafsNotExceedingCapacity) {
+  for (bool use_append : {false, true}) {
+    SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
+
+    AutoUnref refs;
+    std::vector<CordRep*> flats;
+
+    // Build `left` side leaf appending all contained flats to `flats`
+    CordRepBtree* left = MakeLeaf(3);
+    GetLeafEdges(left, flats);
+    refs.RefIf(first_shared(), left);
+
+    // Build `right` side leaf appending all contained flats to `flats`
+    CordRepBtree* right = MakeLeaf(2);
+    GetLeafEdges(right, flats);
+    refs.RefIf(second_shared(), right);
+
+    CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
+                                    : CordRepBtree::Prepend(right, left);
+    EXPECT_THAT(tree, IsNode(0));
+
+    // `tree` contains all flats originally belonging to `left` and `right`.
+    EXPECT_THAT(tree->Edges(), ElementsAreArray(flats));
+    CordRepBtree::Unref(tree);
+  }
+}
+
+TEST_P(CordRepBtreeDualTest, MergeLeafsExceedingCapacity) {
+  for (bool use_append : {false, true}) {
+    SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
+
+    AutoUnref refs;
+
+    // Build `left` side tree appending all contained flats to `flats`
+    CordRepBtree* left = MakeLeaf(CordRepBtree::kMaxCapacity - 2);
+    refs.RefIf(first_shared(), left);
+
+    // Build `right` side tree appending all contained flats to `flats`
+    CordRepBtree* right = MakeLeaf(CordRepBtree::kMaxCapacity - 1);
+    refs.RefIf(second_shared(), right);
+
+    CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
+                                    : CordRepBtree::Prepend(right, left);
+    EXPECT_THAT(tree, IsNode(1));
+    EXPECT_THAT(tree->Edges(), ElementsAre(left, right));
+    CordRepBtree::Unref(tree);
+  }
+}
+
+TEST_P(CordRepBtreeDualTest, MergeEqualHeightTrees) {
+  for (bool use_append : {false, true}) {
+    SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
+
+    AutoUnref refs;
+    std::vector<CordRep*> flats;
+
+    // Build `left` side tree appending all contained flats to `flats`
+    CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 3);
+    GetLeafEdges(left, flats);
+    refs.RefIf(first_shared(), left);
+
+    // Build `right` side tree appending all contained flats to `flats`
+    CordRepBtree* right = MakeTree(CordRepBtree::kMaxCapacity * 2);
+    GetLeafEdges(right, flats);
+    refs.RefIf(second_shared(), right);
+
+    CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
+                                    : CordRepBtree::Prepend(right, left);
+    EXPECT_THAT(tree, IsNode(1));
+    EXPECT_THAT(tree->Edges(), SizeIs(5u));
+
+    // `tree` contains all flats originally belonging to `left` and `right`.
+    EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
+    CordRepBtree::Unref(tree);
+  }
+}
+
+TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeNotExceedingLeafCapacity) {
+  for (bool use_append : {false, true}) {
+    SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
+
+    AutoUnref refs;
+    std::vector<CordRep*> flats;
+
+    // Build `left` side tree appending all added flats to `flats`
+    CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 2 + 2);
+    GetLeafEdges(left, flats);
+    refs.RefIf(first_shared(), left);
+
+    // Build `right` side tree appending all added flats to `flats`
+    CordRepBtree* right = MakeTree(3);
+    GetLeafEdges(right, flats);
+    refs.RefIf(second_shared(), right);
+
+    CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
+                                    : CordRepBtree::Prepend(right, left);
+    EXPECT_THAT(tree, IsNode(1));
+    EXPECT_THAT(tree->Edges(), SizeIs(3u));
+
+    // `tree` contains all flats originally belonging to `left` and `right`.
+    EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
+    CordRepBtree::Unref(tree);
+  }
+}
+
+TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeExceedingLeafCapacity) {
+  for (bool use_append : {false, true}) {
+    SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
+
+    AutoUnref refs;
+    std::vector<CordRep*> flats;
+
+    // Build `left` side tree appending all added flats to `flats`
+    CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 3 - 2);
+    GetLeafEdges(left, flats);
+    refs.RefIf(first_shared(), left);
+
+    // Build `right` side tree appending all added flats to `flats`
+    CordRepBtree* right = MakeTree(3);
+    GetLeafEdges(right, flats);
+    refs.RefIf(second_shared(), right);
+
+    CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
+                                    : CordRepBtree::Prepend(right, left);
+    EXPECT_THAT(tree, IsNode(1));
+    EXPECT_THAT(tree->Edges(), SizeIs(4u));
+
+    // `tree` contains all flats originally belonging to `left` and `right`.
+    EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
+    CordRepBtree::Unref(tree);
+  }
+}
+
+void RefEdgesAt(size_t depth, AutoUnref& refs, CordRepBtree* tree) {
+  absl::Span<CordRep* const> edges = tree->Edges();
+  if (depth == 0) {
+    refs.Ref(edges.front());
+    refs.Ref(edges.back());
+  } else {
+    assert(tree->height() > 0);
+    RefEdgesAt(depth - 1, refs, edges.front()->btree());
+    RefEdgesAt(depth - 1, refs, edges.back()->btree());
+  }
+}
+
+TEST(CordRepBtreeTest, MergeFuzzTest) {
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  std::minstd_rand rnd;
+  std::uniform_int_distribution<int> coin_flip(0, 1);
+  std::uniform_int_distribution<int> dice_throw(1, 6);
+
+  auto random_leaf_count = [&]() {
+    std::uniform_int_distribution<int> dist_height(0, 3);
+    std::uniform_int_distribution<int> dist_leaf(0, max_cap - 1);
+    const int height = dist_height(rnd);
+    return (height ? pow(max_cap, height) : 0) + dist_leaf(rnd);
+  };
+
+  for (int i = 0; i < 10000; ++i) {
+    AutoUnref refs;
+    std::vector<CordRep*> flats;
+
+    CordRepBtree* left = MakeTree(random_leaf_count(), coin_flip(rnd));
+    GetLeafEdges(left, flats);
+    if (dice_throw(rnd) == 1) {
+      std::uniform_int_distribution<size_t> dist(
+          0, static_cast<size_t>(left->height()));
+      RefEdgesAt(dist(rnd), refs, left);
+    }
+
+    CordRepBtree* right = MakeTree(random_leaf_count(), coin_flip(rnd));
+    GetLeafEdges(right, flats);
+    if (dice_throw(rnd) == 1) {
+      std::uniform_int_distribution<size_t> dist(
+          0, static_cast<size_t>(right->height()));
+      RefEdgesAt(dist(rnd), refs, right);
+    }
+
+    CordRepBtree* tree = CordRepBtree::Append(left, right);
+    EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
+    CordRepBtree::Unref(tree);
+  }
+}
+
+TEST_P(CordRepBtreeTest, RemoveSuffix) {
+  // Create tree of 1, 2 and 3 levels high
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  for (size_t cap : {max_cap - 1, max_cap * 2, max_cap * max_cap * 2}) {
+    const std::string data = CreateRandomString(cap * 512);
+
+    {
+      // Verify RemoveSuffix(<all>)
+      AutoUnref refs;
+      CordRepBtree* node = refs.RefIf(shared(), CreateTree(data, 512));
+      EXPECT_THAT(CordRepBtree::RemoveSuffix(node, data.length()), Eq(nullptr));
+
+      // Verify RemoveSuffix(<none>)
+      node = refs.RefIf(shared(), CreateTree(data, 512));
+      EXPECT_THAT(CordRepBtree::RemoveSuffix(node, 0), Eq(node));
+      CordRep::Unref(node);
+    }
+
+    for (size_t n = 1; n < data.length(); ++n) {
+      AutoUnref refs;
+      auto flats = CreateFlatsFromString(data, 512);
+      CordRepBtree* node = refs.RefIf(shared(), CreateTree(flats));
+      CordRep* rep = refs.Add(CordRepBtree::RemoveSuffix(node, n));
+      EXPECT_THAT(CordToString(rep), Eq(data.substr(0, data.length() - n)));
+
+      // Collect all flats
+      auto is_flat = [](CordRep* rep) { return rep->tag >= FLAT; };
+      std::vector<CordRep*> edges = CordCollectRepsIf(is_flat, rep);
+      ASSERT_THAT(edges.size(), Le(flats.size()));
+
+      // Isolate last edge
+      CordRep* last_edge = edges.back();
+      edges.pop_back();
+      const size_t last_length = rep->length - edges.size() * 512;
+
+      // All flats except the last edge must be kept or copied 'as is'
+      size_t index = 0;
+      for (CordRep* edge : edges) {
+        ASSERT_THAT(edge, Eq(flats[index++]));
+        ASSERT_THAT(edge->length, Eq(512u));
+      }
+
+      // CordRepBtree may optimize small substrings to avoid waste, so only
+      // check for flat sharing / updates where the code should always do this.
+      if (last_length >= 500) {
+        EXPECT_THAT(last_edge, Eq(flats[index++]));
+        if (shared()) {
+          EXPECT_THAT(last_edge->length, Eq(512u));
+        } else {
+          EXPECT_TRUE(last_edge->refcount.IsOne());
+          EXPECT_THAT(last_edge->length, Eq(last_length));
+        }
+      }
+    }
+  }
+}
+
+TEST(CordRepBtreeTest, SubTree) {
+  // Create tree of at least 2 levels high
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  const size_t n = max_cap * max_cap * 2;
+  const std::string data = CreateRandomString(n * 3);
+  std::vector<CordRep*> flats;
+  for (absl::string_view s = data; !s.empty(); s.remove_prefix(3)) {
+    flats.push_back(MakeFlat(s.substr(0, 3)));
+  }
+  CordRepBtree* node = CordRepBtree::Create(CordRep::Ref(flats[0]));
+  for (size_t i = 1; i < flats.size(); ++i) {
+    node = CordRepBtree::Append(node, CordRep::Ref(flats[i]));
+  }
+
+  for (size_t offset = 0; offset < data.length(); ++offset) {
+    for (size_t length = 1; length <= data.length() - offset; ++length) {
+      CordRep* rep = node->SubTree(offset, length);
+      EXPECT_THAT(CordToString(rep), Eq(data.substr(offset, length)));
+      CordRep::Unref(rep);
+    }
+  }
+  CordRepBtree::Unref(node);
+  for (CordRep* rep : flats) {
+    CordRep::Unref(rep);
+  }
+}
+
+TEST(CordRepBtreeTest, SubTreeOnExistingSubstring) {
+  // This test verifies that a SubTree call on a pre-existing (large) substring
+  // adjusts the existing substring if not shared, and else rewrites the
+  // existing substring.
+  AutoUnref refs;
+  std::string data = CreateRandomString(1000);
+  CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
+  CordRep* flat = MakeFlat(data);
+  leaf = CordRepBtree::Append(leaf, flat);
+
+  // Setup tree containing substring.
+  CordRep* result = leaf->SubTree(0, 3 + 990);
+  ASSERT_THAT(result->tag, Eq(BTREE));
+  CordRep::Unref(leaf);
+  leaf = result->btree();
+  ASSERT_THAT(leaf->Edges(), ElementsAre(_, IsSubstring(0u, 990u)));
+  EXPECT_THAT(leaf->Edges()[1]->substring()->child, Eq(flat));
+
+  // Verify substring of substring.
+  result = leaf->SubTree(3 + 5, 970);
+  ASSERT_THAT(result, IsSubstring(5u, 970u));
+  EXPECT_THAT(result->substring()->child, Eq(flat));
+  CordRep::Unref(result);
+
+  CordRep::Unref(leaf);
+}
+
+TEST_P(CordRepBtreeTest, AddDataToLeaf) {
+  const size_t n = CordRepBtree::kMaxCapacity;
+  const std::string data = CreateRandomString(n * 3);
+
+  for (bool append : {true, false}) {
+    AutoUnref refs;
+    DataConsumer consumer(data, append);
+    SCOPED_TRACE(append ? "Append" : "Prepend");
+
+    CordRepBtree* leaf = CordRepBtree::Create(MakeFlat(consumer.Next(3)));
+    for (size_t i = 1; i < n; ++i) {
+      refs.RefIf(shared(), leaf);
+      CordRepBtree* result = BtreeAdd(leaf, append, consumer.Next(3));
+      EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
+      EXPECT_THAT(CordToString(result), Eq(consumer.Consumed()));
+      leaf = result;
+    }
+    CordRep::Unref(leaf);
+  }
+}
+
+TEST_P(CordRepBtreeTest, AppendDataToTree) {
+  AutoUnref refs;
+  size_t n = CordRepBtree::kMaxCapacity + CordRepBtree::kMaxCapacity / 2;
+  std::string data = CreateRandomString(n * 3);
+  CordRepBtree* tree = refs.RefIf(shared(), CreateTree(data, 3));
+  CordRepBtree* leaf0 = tree->Edges()[0]->btree();
+  CordRepBtree* leaf1 = tree->Edges()[1]->btree();
+  CordRepBtree* result = CordRepBtree::Append(tree, "123456789");
+  EXPECT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+  EXPECT_THAT(result->Edges(),
+              ElementsAre(leaf0, Conditional(shared(), Ne(leaf1), Eq(leaf1))));
+  EXPECT_THAT(CordToString(result), Eq(data + "123456789"));
+  CordRep::Unref(result);
+}
+
+TEST_P(CordRepBtreeTest, PrependDataToTree) {
+  AutoUnref refs;
+  size_t n = CordRepBtree::kMaxCapacity + CordRepBtree::kMaxCapacity / 2;
+  std::string data = CreateRandomString(n * 3);
+  CordRepBtree* tree = refs.RefIf(shared(), CreateTreeReverse(data, 3));
+  CordRepBtree* leaf0 = tree->Edges()[0]->btree();
+  CordRepBtree* leaf1 = tree->Edges()[1]->btree();
+  CordRepBtree* result = CordRepBtree::Prepend(tree, "123456789");
+  EXPECT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+  EXPECT_THAT(result->Edges(),
+              ElementsAre(Conditional(shared(), Ne(leaf0), Eq(leaf0)), leaf1));
+  EXPECT_THAT(CordToString(result), Eq("123456789" + data));
+  CordRep::Unref(result);
+}
+
+TEST_P(CordRepBtreeTest, AddDataToTreeThreeLevelsDeep) {
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  const size_t n = max_cap * max_cap * max_cap;
+  const std::string data = CreateRandomString(n * 3);
+
+  for (bool append : {true, false}) {
+    AutoUnref refs;
+    DataConsumer consumer(data, append);
+    SCOPED_TRACE(append ? "Append" : "Prepend");
+
+    // Fill leaf
+    CordRepBtree* tree = CordRepBtree::Create(MakeFlat(consumer.Next(3)));
+    for (size_t i = 1; i < max_cap; ++i) {
+      tree = BtreeAdd(tree, append, consumer.Next(3));
+    }
+    ASSERT_THAT(CordToString(tree), Eq(consumer.Consumed()));
+
+    // Fill to maximum at one deep
+    refs.RefIf(shared(), tree);
+    CordRepBtree* result = BtreeAdd(tree, append, consumer.Next(3));
+    ASSERT_THAT(result, IsNode(1));
+    ASSERT_THAT(result, Ne(tree));
+    ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
+    tree = result;
+    for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
+      refs.RefIf(shared(), tree);
+      result = BtreeAdd(tree, append, consumer.Next(3));
+      ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+      ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
+      tree = result;
+    }
+
+    // Fill to maximum at two deep
+    refs.RefIf(shared(), tree);
+    result = BtreeAdd(tree, append, consumer.Next(3));
+    ASSERT_THAT(result, IsNode(2));
+    ASSERT_THAT(result, Ne(tree));
+    ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
+    tree = result;
+    for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap;
+         ++i) {
+      refs.RefIf(shared(), tree);
+      result = BtreeAdd(tree, append, consumer.Next(3));
+      ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
+      ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
+      tree = result;
+    }
+
+    CordRep::Unref(tree);
+  }
+}
+
+TEST_P(CordRepBtreeTest, AddLargeDataToLeaf) {
+  const size_t max_cap = CordRepBtree::kMaxCapacity;
+  const size_t n = max_cap * max_cap * max_cap * 3 + 2;
+  const std::string data = CreateRandomString(n * kMaxFlatLength);
+
+  for (bool append : {true, false}) {
+    AutoUnref refs;
+    SCOPED_TRACE(append ? "Append" : "Prepend");
+
+    CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
+    refs.RefIf(shared(), leaf);
+    CordRepBtree* result = BtreeAdd(leaf, append, data);
+    EXPECT_THAT(CordToString(result), Eq(append ? "abc" + data : data + "abc"));
+    CordRep::Unref(result);
+  }
+}
+
+TEST_P(CordRepBtreeTest, CreateFromTreeReturnsTree) {
+  AutoUnref refs;
+  CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("Hello world"));
+  refs.RefIf(shared(), leaf);
+  CordRepBtree* result = CordRepBtree::Create(leaf);
+  EXPECT_THAT(result, Eq(leaf));
+  CordRep::Unref(result);
+}
+
+TEST(CordRepBtreeTest, GetCharacter) {
+  size_t n = CordRepBtree::kMaxCapacity * CordRepBtree::kMaxCapacity + 2;
+  std::string data = CreateRandomString(n * 3);
+  CordRepBtree* tree = CreateTree(data, 3);
+  // Add a substring node for good measure.
+  tree = tree->Append(tree, MakeSubstring(4, 5, MakeFlat("abcdefghijklm")));
+  data += "efghi";
+  for (size_t i = 0; i < data.length(); ++i) {
+    ASSERT_THAT(tree->GetCharacter(i), Eq(data[i]));
+  }
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeTest, IsFlatSingleFlat) {
+  CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("Hello world"));
+
+  absl::string_view fragment;
+  EXPECT_TRUE(leaf->IsFlat(nullptr));
+  EXPECT_TRUE(leaf->IsFlat(&fragment));
+  EXPECT_THAT(fragment, Eq("Hello world"));
+  fragment = "";
+  EXPECT_TRUE(leaf->IsFlat(0, 11, nullptr));
+  EXPECT_TRUE(leaf->IsFlat(0, 11, &fragment));
+  EXPECT_THAT(fragment, Eq("Hello world"));
+
+  // Arbitrary ranges must check true as well.
+  EXPECT_TRUE(leaf->IsFlat(1, 4, &fragment));
+  EXPECT_THAT(fragment, Eq("ello"));
+  EXPECT_TRUE(leaf->IsFlat(6, 5, &fragment));
+  EXPECT_THAT(fragment, Eq("world"));
+
+  CordRep::Unref(leaf);
+}
+
+TEST(CordRepBtreeTest, IsFlatMultiFlat) {
+  size_t n = CordRepBtree::kMaxCapacity * CordRepBtree::kMaxCapacity + 2;
+  std::string data = CreateRandomString(n * 3);
+  CordRepBtree* tree = CreateTree(data, 3);
+  // Add substring nodes for good measure.
+  tree = tree->Append(tree, MakeSubstring(4, 3, MakeFlat("abcdefghijklm")));
+  tree = tree->Append(tree, MakeSubstring(8, 3, MakeFlat("abcdefghijklm")));
+  data += "efgijk";
+
+  EXPECT_FALSE(tree->IsFlat(nullptr));
+  absl::string_view fragment = "Can't touch this";
+  EXPECT_FALSE(tree->IsFlat(&fragment));
+  EXPECT_THAT(fragment, Eq("Can't touch this"));
+
+  for (size_t offset = 0; offset < data.size(); offset += 3) {
+    EXPECT_TRUE(tree->IsFlat(offset, 3, nullptr));
+    EXPECT_TRUE(tree->IsFlat(offset, 3, &fragment));
+    EXPECT_THAT(fragment, Eq(data.substr(offset, 3)));
+
+    fragment = "Can't touch this";
+    if (offset > 0) {
+      EXPECT_FALSE(tree->IsFlat(offset - 1, 4, nullptr));
+      EXPECT_FALSE(tree->IsFlat(offset - 1, 4, &fragment));
+      EXPECT_THAT(fragment, Eq("Can't touch this"));
+    }
+    if (offset < data.size() - 4) {
+      EXPECT_FALSE(tree->IsFlat(offset, 4, nullptr));
+      EXPECT_FALSE(tree->IsFlat(offset, 4, &fragment));
+      EXPECT_THAT(fragment, Eq("Can't touch this"));
+    }
+  }
+
+  CordRep::Unref(tree);
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+
+TEST_P(CordRepBtreeHeightTest, GetAppendBufferNotPrivate) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeExternal("Foo"));
+  CordRepBtree::Ref(tree);
+  EXPECT_DEATH(tree->GetAppendBuffer(1), ".*");
+  CordRepBtree::Unref(tree);
+  CordRepBtree::Unref(tree);
+}
+
+#endif  // defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+
+TEST_P(CordRepBtreeHeightTest, GetAppendBufferNotFlat) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeExternal("Foo"));
+  for (int i = 1; i <= height(); ++i) {
+    tree = CordRepBtree::New(tree);
+  }
+  EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
+  CordRepBtree::Unref(tree);
+}
+
+TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatNotPrivate) {
+  CordRepFlat* flat = MakeFlat("abc");
+  CordRepBtree* tree = CordRepBtree::Create(CordRep::Ref(flat));
+  for (int i = 1; i <= height(); ++i) {
+    tree = CordRepBtree::New(tree);
+  }
+  EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
+  CordRepBtree::Unref(tree);
+  CordRep::Unref(flat);
+}
+
+TEST_P(CordRepBtreeHeightTest, GetAppendBufferTreeNotPrivate) {
+  if (height() == 0) return;
+  AutoUnref refs;
+  CordRepFlat* flat = MakeFlat("abc");
+  CordRepBtree* tree = CordRepBtree::Create(CordRep::Ref(flat));
+  for (int i = 1; i <= height(); ++i) {
+    if (i == (height() + 1) / 2) refs.Ref(tree);
+    tree = CordRepBtree::New(tree);
+  }
+  EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
+  CordRepBtree::Unref(tree);
+  CordRep::Unref(flat);
+}
+
+TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatNoCapacity) {
+  CordRepFlat* flat = MakeFlat("abc");
+  flat->length = flat->Capacity();
+  CordRepBtree* tree = CordRepBtree::Create(flat);
+  for (int i = 1; i <= height(); ++i) {
+    tree = CordRepBtree::New(tree);
+  }
+  EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
+  CordRepBtree::Unref(tree);
+}
+
+TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatWithCapacity) {
+  CordRepFlat* flat = MakeFlat("abc");
+  CordRepBtree* tree = CordRepBtree::Create(flat);
+  for (int i = 1; i <= height(); ++i) {
+    tree = CordRepBtree::New(tree);
+  }
+  absl::Span<char> span = tree->GetAppendBuffer(2);
+  EXPECT_THAT(span, SizeIs(2u));
+  EXPECT_THAT(span.data(), TypedEq<void*>(flat->Data() + 3));
+  EXPECT_THAT(tree->length, Eq(5u));
+
+  size_t avail = flat->Capacity() - 5;
+  span = tree->GetAppendBuffer(avail + 100);
+  EXPECT_THAT(span, SizeIs(avail));
+  EXPECT_THAT(span.data(), TypedEq<void*>(flat->Data() + 5));
+  EXPECT_THAT(tree->length, Eq(5 + avail));
+
+  CordRepBtree::Unref(tree);
+}
+
+TEST(CordRepBtreeTest, Dump) {
+  // Handles nullptr
+  std::stringstream ss;
+  CordRepBtree::Dump(nullptr, ss);
+  CordRepBtree::Dump(nullptr, "Once upon a label", ss);
+  CordRepBtree::Dump(nullptr, "Once upon a label", false, ss);
+  CordRepBtree::Dump(nullptr, "Once upon a label", true, ss);
+
+  // Cover legal edges
+  CordRepFlat* flat = MakeFlat("Hello world");
+  CordRepExternal* external = MakeExternal("Hello external");
+  CordRep* substr_flat = MakeSubstring(1, 6, CordRep::Ref(flat));
+  CordRep* substr_external = MakeSubstring(2, 7, CordRep::Ref(external));
+
+  // Build tree
+  CordRepBtree* tree = CordRepBtree::Create(flat);
+  tree = CordRepBtree::Append(tree, external);
+  tree = CordRepBtree::Append(tree, substr_flat);
+  tree = CordRepBtree::Append(tree, substr_external);
+
+  // Repeat until we have a tree
+  while (tree->height() == 0) {
+    tree = CordRepBtree::Append(tree, CordRep::Ref(flat));
+    tree = CordRepBtree::Append(tree, CordRep::Ref(external));
+    tree = CordRepBtree::Append(tree, CordRep::Ref(substr_flat));
+    tree = CordRepBtree::Append(tree, CordRep::Ref(substr_external));
+  }
+
+  for (int api = 0; api <= 3; ++api) {
+    absl::string_view api_scope;
+    std::stringstream ss;
+    switch (api) {
+      case 0:
+        api_scope = "Bare";
+        CordRepBtree::Dump(tree, ss);
+        break;
+      case 1:
+        api_scope = "Label only";
+        CordRepBtree::Dump(tree, "Once upon a label", ss);
+        break;
+      case 2:
+        api_scope = "Label no content";
+        CordRepBtree::Dump(tree, "Once upon a label", false, ss);
+        break;
+      default:
+        api_scope = "Label and content";
+        CordRepBtree::Dump(tree, "Once upon a label", true, ss);
+        break;
+    }
+    SCOPED_TRACE(api_scope);
+    std::string str = ss.str();
+
+    // Contains Node(depth) / Leaf and private / shared indicators
+    EXPECT_THAT(str, AllOf(HasSubstr("Node(1)"), HasSubstr("Leaf"),
+                           HasSubstr("Private"), HasSubstr("Shared")));
+
+    // Contains length and start offset of all data edges
+    EXPECT_THAT(str, AllOf(HasSubstr("len = 11"), HasSubstr("len = 14"),
+                           HasSubstr("len = 6"), HasSubstr("len = 7"),
+                           HasSubstr("start = 1"), HasSubstr("start = 2")));
+
+    // Contains address of all data edges
+    EXPECT_THAT(
+        str, AllOf(HasSubstr(absl::StrCat("0x", absl::Hex(flat))),
+                   HasSubstr(absl::StrCat("0x", absl::Hex(external))),
+                   HasSubstr(absl::StrCat("0x", absl::Hex(substr_flat))),
+                   HasSubstr(absl::StrCat("0x", absl::Hex(substr_external)))));
+
+    if (api != 0) {
+      // Contains label
+      EXPECT_THAT(str, HasSubstr("Once upon a label"));
+    }
+
+    if (api != 3) {
+      // Does not contain contents
+      EXPECT_THAT(str, Not(AnyOf((HasSubstr("data = \"Hello world\""),
+                                  HasSubstr("data = \"Hello external\""),
+                                  HasSubstr("data = \"ello w\""),
+                                  HasSubstr("data = \"llo ext\"")))));
+    } else {
+      // Contains contents
+      EXPECT_THAT(str, AllOf((HasSubstr("data = \"Hello world\""),
+                              HasSubstr("data = \"Hello external\""),
+                              HasSubstr("data = \"ello w\""),
+                              HasSubstr("data = \"llo ext\""))));
+    }
+  }
+
+  CordRep::Unref(tree);
+}
+
+TEST(CordRepBtreeTest, IsValid) {
+  EXPECT_FALSE(CordRepBtree::IsValid(nullptr));
+
+  CordRepBtree* empty = CordRepBtree::New(0);
+  EXPECT_TRUE(CordRepBtree::IsValid(empty));
+  CordRep::Unref(empty);
+
+  for (bool as_tree : {false, true}) {
+    CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
+    CordRepBtree* tree = as_tree ? CordRepBtree::New(leaf) : nullptr;
+    CordRepBtree* check = as_tree ? tree : leaf;
+
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    leaf->length--;
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->length++;
+
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    leaf->tag--;
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->tag++;
+
+    // Height
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    leaf->storage[0] = static_cast<uint8_t>(CordRepBtree::kMaxHeight + 1);
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->storage[0] = 1;
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->storage[0] = 0;
+
+    // Begin
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    const uint8_t begin = leaf->storage[1];
+    leaf->storage[1] = static_cast<uint8_t>(CordRepBtree::kMaxCapacity);
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->storage[1] = 2;
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->storage[1] = begin;
+
+    // End
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    const uint8_t end = leaf->storage[2];
+    leaf->storage[2] = static_cast<uint8_t>(CordRepBtree::kMaxCapacity + 1);
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    leaf->storage[2] = end;
+
+    // DataEdge tag and value
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    CordRep* const edge = leaf->Edges()[0];
+    const uint8_t tag = edge->tag;
+    CordRepBtreeTestPeer::SetEdge(leaf, begin, nullptr);
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    CordRepBtreeTestPeer::SetEdge(leaf, begin, edge);
+    edge->tag = BTREE;
+    EXPECT_FALSE(CordRepBtree::IsValid(check));
+    edge->tag = tag;
+
+    if (as_tree) {
+      ASSERT_TRUE(CordRepBtree::IsValid(check));
+      leaf->length--;
+      EXPECT_FALSE(CordRepBtree::IsValid(check));
+      leaf->length++;
+
+      // Height
+      ASSERT_TRUE(CordRepBtree::IsValid(check));
+      tree->storage[0] = static_cast<uint8_t>(2);
+      EXPECT_FALSE(CordRepBtree::IsValid(check));
+      tree->storage[0] = 1;
+
+      // Btree edge
+      ASSERT_TRUE(CordRepBtree::IsValid(check));
+      CordRep* const edge = tree->Edges()[0];
+      const uint8_t tag = edge->tag;
+      edge->tag = FLAT;
+      EXPECT_FALSE(CordRepBtree::IsValid(check));
+      edge->tag = tag;
+    }
+
+    ASSERT_TRUE(CordRepBtree::IsValid(check));
+    CordRep::Unref(check);
+  }
+}
+
+TEST(CordRepBtreeTest, AssertValid) {
+  CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
+  const CordRepBtree* ctree = tree;
+  EXPECT_THAT(CordRepBtree::AssertValid(tree), Eq(tree));
+  EXPECT_THAT(CordRepBtree::AssertValid(ctree), Eq(ctree));
+
+#if defined(GTEST_HAS_DEATH_TEST)
+  CordRepBtree* nulltree = nullptr;
+  const CordRepBtree* cnulltree = nullptr;
+  EXPECT_DEBUG_DEATH(
+      EXPECT_THAT(CordRepBtree::AssertValid(nulltree), Eq(nulltree)), ".*");
+  EXPECT_DEBUG_DEATH(
+      EXPECT_THAT(CordRepBtree::AssertValid(cnulltree), Eq(cnulltree)), ".*");
+
+  tree->length--;
+  EXPECT_DEBUG_DEATH(EXPECT_THAT(CordRepBtree::AssertValid(tree), Eq(tree)),
+                     ".*");
+  EXPECT_DEBUG_DEATH(EXPECT_THAT(CordRepBtree::AssertValid(ctree), Eq(ctree)),
+                     ".*");
+  tree->length++;
+#endif
+  CordRep::Unref(tree);
+}
+
+TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
+  // Restore exhaustive validation on any exit.
+  const bool exhaustive_validation = IsCordBtreeExhaustiveValidationEnabled();
+  auto cleanup = absl::MakeCleanup([exhaustive_validation] {
+    SetCordBtreeExhaustiveValidation(exhaustive_validation);
+  });
+
+  // Create a tree of at least 2 levels, and mess with the original flat, which
+  // should go undetected in shallow mode as the flat is too far away, but
+  // should be detected in forced non-shallow mode.
+  CordRep* flat = MakeFlat("abc");
+  CordRepBtree* tree = CordRepBtree::Create(flat);
+  constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
+  const size_t n = max_cap * max_cap * 2;
+  for (size_t i = 0; i < n; ++i) {
+    tree = CordRepBtree::Append(tree, MakeFlat("Hello world"));
+  }
+  flat->length = 100;
+
+  SetCordBtreeExhaustiveValidation(false);
+  EXPECT_FALSE(CordRepBtree::IsValid(tree));
+  EXPECT_TRUE(CordRepBtree::IsValid(tree, true));
+  EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
+  CordRepBtree::AssertValid(tree);
+  CordRepBtree::AssertValid(tree, true);
+#if defined(GTEST_HAS_DEATH_TEST)
+  EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, false), ".*");
+#endif
+
+  SetCordBtreeExhaustiveValidation(true);
+  EXPECT_FALSE(CordRepBtree::IsValid(tree));
+  EXPECT_FALSE(CordRepBtree::IsValid(tree, true));
+  EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
+#if defined(GTEST_HAS_DEATH_TEST)
+  EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree), ".*");
+  EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, true), ".*");
+#endif
+
+  flat->length = 3;
+  CordRep::Unref(tree);
+}
+
+TEST_P(CordRepBtreeTest, Rebuild) {
+  for (size_t size : {3u, 8u, 100u, 10000u, 1000000u}) {
+    SCOPED_TRACE(absl::StrCat("Rebuild @", size));
+
+    std::vector<CordRepFlat*> flats;
+    for (size_t i = 0; i < size; ++i) {
+      flats.push_back(CordRepFlat::New(2));
+      flats.back()->Data()[0] = 'x';
+      flats.back()->length = 1;
+    }
+
+    // Build the tree into 'right', and each so many 'split_limit' edges,
+    // combine 'left' + 'right' into a new 'left', and start a new 'right'.
+    // This guarantees we get a reasonable amount of chaos in the tree.
+    size_t split_count = 0;
+    size_t split_limit = 3;
+    auto it = flats.begin();
+    CordRepBtree* left = nullptr;
+    CordRepBtree* right = CordRepBtree::New(*it);
+    while (++it != flats.end()) {
+      if (++split_count >= split_limit) {
+        split_limit += split_limit / 16;
+        left = left ? CordRepBtree::Append(left, right) : right;
+        right = CordRepBtree::New(*it);
+      } else {
+        right = CordRepBtree::Append(right, *it);
+      }
+    }
+
+    // Finalize tree
+    left = left ? CordRepBtree::Append(left, right) : right;
+
+    // Rebuild
+    AutoUnref ref;
+    left = ref.Add(CordRepBtree::Rebuild(ref.RefIf(shared(), left)));
+    ASSERT_TRUE(CordRepBtree::IsValid(left));
+
+    // Verify we have the exact same edges in the exact same order.
+    bool ok = true;
+    it = flats.begin();
+    CordVisitReps(left, [&](CordRep* edge) {
+      if (edge->tag < FLAT) return;
+      ok = ok && (it != flats.end() && *it++ == edge);
+    });
+    EXPECT_TRUE(ok && it == flats.end()) << "Rebuild edges mismatch";
+  }
+}
+
+// Convenience helper for CordRepBtree::ExtractAppendBuffer
+CordRepBtree::ExtractResult ExtractLast(CordRepBtree* input, size_t cap = 1) {
+  return CordRepBtree::ExtractAppendBuffer(input, cap);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferLeafSingleFlat) {
+  CordRep* flat = MakeFlat("Abc");
+  CordRepBtree* leaf = CordRepBtree::Create(flat);
+  EXPECT_THAT(ExtractLast(leaf), EqExtractResult(nullptr, flat));
+  CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeSingleFlat) {
+  CordRep* flat = MakeFlat("Abc");
+  CordRepBtree* leaf = CordRepBtree::Create(flat);
+  CordRepBtree* node = CordRepBtree::New(leaf);
+  EXPECT_THAT(ExtractLast(node), EqExtractResult(nullptr, flat));
+  CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferLeafTwoFlats) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+  CordRepBtree* leaf = CreateTree(flats);
+  EXPECT_THAT(ExtractLast(leaf), EqExtractResult(flats[0], flats[1]));
+  CordRep::Unref(flats[0]);
+  CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlats) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+  CordRepBtree* leaf = CreateTree(flats);
+  CordRepBtree* node = CordRepBtree::New(leaf);
+  EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1]));
+  CordRep::Unref(flats[0]);
+  CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlatsInTwoLeafs) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+  CordRepBtree* leaf1 = CordRepBtree::Create(flats[0]);
+  CordRepBtree* leaf2 = CordRepBtree::Create(flats[1]);
+  CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
+  EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1]));
+  CordRep::Unref(flats[0]);
+  CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferLeafThreeFlats) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdefghi", 3);
+  CordRepBtree* leaf = CreateTree(flats);
+  EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, flats[2]));
+  CordRep::Unref(flats[2]);
+  CordRep::Unref(leaf);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightNoFolding) {
+  CordRep* flat = MakeFlat("Abc");
+  std::vector<CordRep*> flats = CreateFlatsFromString("defghi", 3);
+  CordRepBtree* leaf1 = CordRepBtree::Create(flat);
+  CordRepBtree* leaf2 = CreateTree(flats);
+  CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
+  EXPECT_THAT(ExtractLast(node), EqExtractResult(node, flats[1]));
+  EXPECT_THAT(node->Edges(), ElementsAre(leaf1, leaf2));
+  EXPECT_THAT(leaf1->Edges(), ElementsAre(flat));
+  EXPECT_THAT(leaf2->Edges(), ElementsAre(flats[0]));
+  CordRep::Unref(node);
+  CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightLeafFolding) {
+  CordRep* flat = MakeFlat("Abc");
+  std::vector<CordRep*> flats = CreateFlatsFromString("defghi", 3);
+  CordRepBtree* leaf1 = CreateTree(flats);
+  CordRepBtree* leaf2 = CordRepBtree::Create(flat);
+  CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
+  EXPECT_THAT(ExtractLast(node), EqExtractResult(leaf1, flat));
+  EXPECT_THAT(leaf1->Edges(), ElementsAreArray(flats));
+  CordRep::Unref(leaf1);
+  CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNoCapacity) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+  CordRepBtree* leaf = CreateTree(flats);
+  size_t avail = flats[1]->flat()->Capacity() - flats[1]->length;
+  EXPECT_THAT(ExtractLast(leaf, avail + 1), EqExtractResult(leaf, nullptr));
+  EXPECT_THAT(ExtractLast(leaf, avail), EqExtractResult(flats[0], flats[1]));
+  CordRep::Unref(flats[0]);
+  CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNotFlat) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+  auto substr = MakeSubstring(1, 2, flats[1]);
+  CordRepBtree* leaf = CreateTree({flats[0], substr});
+  EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
+  CordRep::Unref(leaf);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferShared) {
+  std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+  CordRepBtree* leaf = CreateTree(flats);
+
+  CordRep::Ref(flats[1]);
+  EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
+  CordRep::Unref(flats[1]);
+
+  CordRep::Ref(leaf);
+  EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
+  CordRep::Unref(leaf);
+
+  CordRepBtree* node = CordRepBtree::New(leaf);
+  CordRep::Ref(node);
+  EXPECT_THAT(ExtractLast(node), EqExtractResult(node, nullptr));
+  CordRep::Unref(node);
+
+  CordRep::Unref(node);
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_consume.cc b/abseil-cpp/absl/strings/internal/cord_rep_consume.cc
new file mode 100644
index 0000000..db7d4fe
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_consume.cc
@@ -0,0 +1,64 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_consume.h"
+
+#include <array>
+#include <utility>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/functional/function_ref.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+// Unrefs the provided `substring`, and returns `substring->child`
+// Adds or assumes a reference on `substring->child`
+CordRep* ClipSubstring(CordRepSubstring* substring) {
+  CordRep* child = substring->child;
+  if (substring->refcount.IsOne()) {
+    delete substring;
+  } else {
+    CordRep::Ref(child);
+    CordRep::Unref(substring);
+  }
+  return child;
+}
+
+}  // namespace
+
+void Consume(CordRep* rep,
+             FunctionRef<void(CordRep*, size_t, size_t)> consume_fn) {
+  size_t offset = 0;
+  size_t length = rep->length;
+
+  if (rep->tag == SUBSTRING) {
+    offset += rep->substring()->start;
+    rep = ClipSubstring(rep->substring());
+  }
+  consume_fn(rep, offset, length);
+}
+
+void ReverseConsume(CordRep* rep,
+                    FunctionRef<void(CordRep*, size_t, size_t)> consume_fn) {
+  return Consume(rep, consume_fn);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_consume.h b/abseil-cpp/absl/strings/internal/cord_rep_consume.h
new file mode 100644
index 0000000..bece187
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_consume.h
@@ -0,0 +1,47 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_
+
+#include <functional>
+
+#include "absl/functional/function_ref.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Consume() and ReverseConsume() consume CONCAT based trees and invoke the
+// provided functor with the contained nodes in the proper forward or reverse
+// order, which is used to convert CONCAT trees into other tree or cord data.
+// All CONCAT and SUBSTRING nodes are processed internally. The 'offset`
+// parameter of the functor is non-zero for any nodes below SUBSTRING nodes.
+// It's up to the caller to form these back into SUBSTRING nodes or otherwise
+// store offset / prefix information. These functions are intended to be used
+// only for migration / transitional code where due to factors such as ODR
+// violations, we can not 100% guarantee that all code respects 'new format'
+// settings and flags, so we need to be able to parse old data on the fly until
+// all old code is deprecated / no longer the default format.
+void Consume(CordRep* rep,
+             FunctionRef<void(CordRep*, size_t, size_t)> consume_fn);
+void ReverseConsume(CordRep* rep,
+                    FunctionRef<void(CordRep*, size_t, size_t)> consume_fn);
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_crc.cc b/abseil-cpp/absl/strings/internal/cord_rep_crc.cc
new file mode 100644
index 0000000..dbe54cc
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_crc.cc
@@ -0,0 +1,56 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_crc.h"
+
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+CordRepCrc* CordRepCrc::New(CordRep* child, crc_internal::CrcCordState state) {
+  if (child != nullptr && child->IsCrc()) {
+    if (child->refcount.IsOne()) {
+      child->crc()->crc_cord_state = std::move(state);
+      return child->crc();
+    }
+    CordRep* old = child;
+    child = old->crc()->child;
+    CordRep::Ref(child);
+    CordRep::Unref(old);
+  }
+  auto* new_cordrep = new CordRepCrc;
+  new_cordrep->length = child != nullptr ? child->length : 0;
+  new_cordrep->tag = cord_internal::CRC;
+  new_cordrep->child = child;
+  new_cordrep->crc_cord_state = std::move(state);
+  return new_cordrep;
+}
+
+void CordRepCrc::Destroy(CordRepCrc* node) {
+  if (node->child != nullptr) {
+    CordRep::Unref(node->child);
+  }
+  delete node;
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_crc.h b/abseil-cpp/absl/strings/internal/cord_rep_crc.h
new file mode 100644
index 0000000..379d7a6
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_crc.h
@@ -0,0 +1,103 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
+
+#include <cassert>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/crc/internal/crc_cord_state.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepCrc is a CordRep node intended only to appear at the top level of a
+// cord tree.  It associates an "expected CRC" with the contained data, to allow
+// for easy passage of checksum data in Cord data flows.
+//
+// From Cord's perspective, the crc value has no semantics; any validation of
+// the contained checksum is the user's responsibility.
+struct CordRepCrc : public CordRep {
+  CordRep* child;
+  absl::crc_internal::CrcCordState crc_cord_state;
+
+  // Consumes `child` and returns a CordRepCrc prefixed tree containing `child`.
+  // If the specified `child` is itself a CordRepCrc node, then this method
+  // either replaces the existing node, or directly updates the crc state in it
+  // depending on the node being shared or not, i.e.: refcount.IsOne().
+  // `child` must only be null if the Cord is empty. Never returns null.
+  static CordRepCrc* New(CordRep* child, crc_internal::CrcCordState state);
+
+  // Destroys (deletes) the provided node. `node` must not be null.
+  static void Destroy(CordRepCrc* node);
+};
+
+// Consumes `rep` and returns a CordRep* with any outer CordRepCrc wrapper
+// removed.  This is usually a no-op (returning `rep`), but this will remove and
+// unref an outer CordRepCrc node.
+inline CordRep* RemoveCrcNode(CordRep* rep) {
+  assert(rep != nullptr);
+  if (ABSL_PREDICT_FALSE(rep->IsCrc())) {
+    CordRep* child = rep->crc()->child;
+    if (rep->refcount.IsOne()) {
+      delete rep->crc();
+    } else {
+      CordRep::Ref(child);
+      CordRep::Unref(rep);
+    }
+    return child;
+  }
+  return rep;
+}
+
+// Returns `rep` if it is not a CordRepCrc node, or its child if it is.
+// Does not consume or create a reference on `rep` or the returned value.
+inline CordRep* SkipCrcNode(CordRep* rep) {
+  assert(rep != nullptr);
+  if (ABSL_PREDICT_FALSE(rep->IsCrc())) {
+    return rep->crc()->child;
+  } else {
+    return rep;
+  }
+}
+
+inline const CordRep* SkipCrcNode(const CordRep* rep) {
+  assert(rep != nullptr);
+  if (ABSL_PREDICT_FALSE(rep->IsCrc())) {
+    return rep->crc()->child;
+  } else {
+    return rep;
+  }
+}
+
+inline CordRepCrc* CordRep::crc() {
+  assert(IsCrc());
+  return static_cast<CordRepCrc*>(this);
+}
+
+inline const CordRepCrc* CordRep::crc() const {
+  assert(IsCrc());
+  return static_cast<const CordRepCrc*>(this);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc b/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc
new file mode 100644
index 0000000..3d27c33
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc
@@ -0,0 +1,130 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_crc.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/crc/internal/crc_cord_state.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::absl::cordrep_testing::MakeFlat;
+using ::testing::Eq;
+using ::testing::IsNull;
+using ::testing::Ne;
+
+#if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST
+
+TEST(CordRepCrc, RemoveCrcWithNullptr) {
+  EXPECT_DEATH(RemoveCrcNode(nullptr), "");
+}
+
+#endif  // !NDEBUG && GTEST_HAS_DEATH_TEST
+
+absl::crc_internal::CrcCordState MakeCrcCordState(uint32_t crc) {
+  crc_internal::CrcCordState state;
+  state.mutable_rep()->prefix_crc.push_back(
+      crc_internal::CrcCordState::PrefixCrc(42, crc32c_t{crc}));
+  return state;
+}
+
+TEST(CordRepCrc, NewDestroy) {
+  CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+  CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
+  EXPECT_TRUE(crc->refcount.IsOne());
+  EXPECT_THAT(crc->child, Eq(rep));
+  EXPECT_THAT(crc->crc_cord_state.Checksum(), Eq(crc32c_t{12345u}));
+  EXPECT_TRUE(rep->refcount.IsOne());
+  CordRepCrc::Destroy(crc);
+}
+
+TEST(CordRepCrc, NewExistingCrcNotShared) {
+  CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+  CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
+  CordRepCrc* new_crc = CordRepCrc::New(crc, MakeCrcCordState(54321));
+  EXPECT_THAT(new_crc, Eq(crc));
+  EXPECT_TRUE(new_crc->refcount.IsOne());
+  EXPECT_THAT(new_crc->child, Eq(rep));
+  EXPECT_THAT(new_crc->crc_cord_state.Checksum(), Eq(crc32c_t{54321u}));
+  EXPECT_TRUE(rep->refcount.IsOne());
+  CordRepCrc::Destroy(new_crc);
+}
+
+TEST(CordRepCrc, NewExistingCrcShared) {
+  CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+  CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
+  CordRep::Ref(crc);
+  CordRepCrc* new_crc = CordRepCrc::New(crc, MakeCrcCordState(54321));
+
+  EXPECT_THAT(new_crc, Ne(crc));
+  EXPECT_TRUE(new_crc->refcount.IsOne());
+  EXPECT_TRUE(crc->refcount.IsOne());
+  EXPECT_FALSE(rep->refcount.IsOne());
+  EXPECT_THAT(crc->child, Eq(rep));
+  EXPECT_THAT(new_crc->child, Eq(rep));
+  EXPECT_THAT(crc->crc_cord_state.Checksum(), Eq(crc32c_t{12345u}));
+  EXPECT_THAT(new_crc->crc_cord_state.Checksum(), Eq(crc32c_t{54321u}));
+
+  CordRep::Unref(crc);
+  CordRep::Unref(new_crc);
+}
+
+TEST(CordRepCrc, NewEmpty) {
+  CordRepCrc* crc = CordRepCrc::New(nullptr, MakeCrcCordState(12345));
+  EXPECT_TRUE(crc->refcount.IsOne());
+  EXPECT_THAT(crc->child, IsNull());
+  EXPECT_THAT(crc->length, Eq(0u));
+  EXPECT_THAT(crc->crc_cord_state.Checksum(), Eq(crc32c_t{12345u}));
+  EXPECT_TRUE(crc->refcount.IsOne());
+  CordRepCrc::Destroy(crc);
+}
+
+TEST(CordRepCrc, RemoveCrcNotCrc) {
+  CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+  CordRep* nocrc = RemoveCrcNode(rep);
+  EXPECT_THAT(nocrc, Eq(rep));
+  CordRep::Unref(nocrc);
+}
+
+TEST(CordRepCrc, RemoveCrcNotShared) {
+  CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+  CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
+  CordRep* nocrc = RemoveCrcNode(crc);
+  EXPECT_THAT(nocrc, Eq(rep));
+  EXPECT_TRUE(rep->refcount.IsOne());
+  CordRep::Unref(nocrc);
+}
+
+TEST(CordRepCrc, RemoveCrcShared) {
+  CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+  CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
+  CordRep::Ref(crc);
+  CordRep* nocrc = RemoveCrcNode(crc);
+  EXPECT_THAT(nocrc, Eq(rep));
+  EXPECT_FALSE(rep->refcount.IsOne());
+  CordRep::Unref(nocrc);
+  CordRep::Unref(crc);
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_flat.h b/abseil-cpp/absl/strings/internal/cord_rep_flat.h
new file mode 100644
index 0000000..27c4b21
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_flat.h
@@ -0,0 +1,195 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Note: all constants below are never ODR used and internal to cord, we define
+// these as static constexpr to avoid 'in struct' definition and usage clutter.
+
+// Largest and smallest flat node lengths we are willing to allocate
+// Flat allocation size is stored in tag, which currently can encode sizes up
+// to 4K, encoded as multiple of either 8 or 32 bytes.
+// If we allow for larger sizes, we need to change this to 8/64, 16/128, etc.
+// kMinFlatSize is bounded by tag needing to be at least FLAT * 8 bytes, and
+// ideally a 'nice' size aligning with allocation and cacheline sizes like 32.
+// kMaxFlatSize is bounded by the size resulting in a computed tag no greater
+// than MAX_FLAT_TAG. MAX_FLAT_TAG provides for additional 'high' tag values.
+static constexpr size_t kFlatOverhead = offsetof(CordRep, storage);
+static constexpr size_t kMinFlatSize = 32;
+static constexpr size_t kMaxFlatSize = 4096;
+static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
+static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
+static constexpr size_t kMaxLargeFlatSize = 256 * 1024;
+static constexpr size_t kMaxLargeFlatLength = kMaxLargeFlatSize - kFlatOverhead;
+
+// kTagBase should make the Size <--> Tag computation resilient
+// against changes to the value of FLAT when we add a new tag..
+static constexpr uint8_t kTagBase = FLAT - 4;
+
+// Converts the provided rounded size to the corresponding tag
+constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
+  return static_cast<uint8_t>(size <= 512 ? kTagBase + size / 8
+                              : size <= 8192
+                                  ? kTagBase + 512 / 8 + size / 64 - 512 / 64
+                                  : kTagBase + 512 / 8 + ((8192 - 512) / 64) +
+                                        size / 4096 - 8192 / 4096);
+}
+
+// Converts the provided tag to the corresponding allocated size
+constexpr size_t TagToAllocatedSize(uint8_t tag) {
+  return (tag <= kTagBase + 512 / 8) ? tag * 8 - kTagBase * 8
+         : (tag <= kTagBase + (512 / 8) + ((8192 - 512) / 64))
+             ? 512 + tag * 64 - kTagBase * 64 - 512 / 8 * 64
+             : 8192 + tag * 4096 - kTagBase * 4096 -
+                   ((512 / 8) + ((8192 - 512) / 64)) * 4096;
+}
+
+static_assert(AllocatedSizeToTagUnchecked(kMinFlatSize) == FLAT, "");
+static_assert(AllocatedSizeToTagUnchecked(kMaxLargeFlatSize) == MAX_FLAT_TAG,
+              "");
+
+// RoundUp logically performs `((n + m - 1) / m) * m` to round up to the nearest
+// multiple of `m`, optimized for the invariant that `m` is a power of 2.
+constexpr size_t RoundUp(size_t n, size_t m) {
+  return (n + m - 1) & (0 - m);
+}
+
+// Returns the size to the nearest equal or larger value that can be
+// expressed exactly as a tag value.
+inline size_t RoundUpForTag(size_t size) {
+  return RoundUp(size, (size <= 512) ? 8 : (size <= 8192 ? 64 : 4096));
+}
+
+// Converts the allocated size to a tag, rounding down if the size
+// does not exactly match a 'tag expressible' size value. The result is
+// undefined if the size exceeds the maximum size that can be encoded in
+// a tag, i.e., if size is larger than TagToAllocatedSize(<max tag>).
+inline uint8_t AllocatedSizeToTag(size_t size) {
+  const uint8_t tag = AllocatedSizeToTagUnchecked(size);
+  assert(tag <= MAX_FLAT_TAG);
+  return tag;
+}
+
+// Converts the provided tag to the corresponding available data length
+constexpr size_t TagToLength(uint8_t tag) {
+  return TagToAllocatedSize(tag) - kFlatOverhead;
+}
+
+// Enforce that kMaxFlatSize maps to a well-known exact tag value.
+static_assert(TagToAllocatedSize(MAX_FLAT_TAG) == kMaxLargeFlatSize,
+              "Bad tag logic");
+
+struct CordRepFlat : public CordRep {
+  // Tag for explicit 'large flat' allocation
+  struct Large {};
+
+  // Creates a new flat node.
+  template <size_t max_flat_size, typename... Args>
+  static CordRepFlat* NewImpl(size_t len, Args... args ABSL_ATTRIBUTE_UNUSED) {
+    if (len <= kMinFlatLength) {
+      len = kMinFlatLength;
+    } else if (len > max_flat_size - kFlatOverhead) {
+      len = max_flat_size - kFlatOverhead;
+    }
+
+    // Round size up so it matches a size we can exactly express in a tag.
+    const size_t size = RoundUpForTag(len + kFlatOverhead);
+    void* const raw_rep = ::operator new(size);
+    // GCC 13 has a false-positive -Wstringop-overflow warning here.
+    #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0)
+    #pragma GCC diagnostic push
+    #pragma GCC diagnostic ignored "-Wstringop-overflow"
+    #endif
+    CordRepFlat* rep = new (raw_rep) CordRepFlat();
+    rep->tag = AllocatedSizeToTag(size);
+    #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0)
+    #pragma GCC diagnostic pop
+    #endif
+    return rep;
+  }
+
+  static CordRepFlat* New(size_t len) { return NewImpl<kMaxFlatSize>(len); }
+
+  static CordRepFlat* New(Large, size_t len) {
+    return NewImpl<kMaxLargeFlatSize>(len);
+  }
+
+  // Deletes a CordRepFlat instance created previously through a call to New().
+  // Flat CordReps are allocated and constructed with raw ::operator new and
+  // placement new, and must be destructed and deallocated accordingly.
+  static void Delete(CordRep*rep) {
+    assert(rep->tag >= FLAT && rep->tag <= MAX_FLAT_TAG);
+
+#if defined(__cpp_sized_deallocation)
+    size_t size = TagToAllocatedSize(rep->tag);
+    rep->~CordRep();
+    ::operator delete(rep, size);
+#else
+    rep->~CordRep();
+    ::operator delete(rep);
+#endif
+  }
+
+  // Create a CordRepFlat containing `data`, with an optional additional
+  // extra capacity of up to `extra` bytes. Requires that `data.size()`
+  // is less than kMaxFlatLength.
+  static CordRepFlat* Create(absl::string_view data, size_t extra = 0) {
+    assert(data.size() <= kMaxFlatLength);
+    CordRepFlat* flat = New(data.size() + (std::min)(extra, kMaxFlatLength));
+    memcpy(flat->Data(), data.data(), data.size());
+    flat->length = data.size();
+    return flat;
+  }
+
+  // Returns a pointer to the data inside this flat rep.
+  char* Data() { return reinterpret_cast<char*>(storage); }
+  const char* Data() const { return reinterpret_cast<const char*>(storage); }
+
+  // Returns the maximum capacity (payload size) of this instance.
+  size_t Capacity() const { return TagToLength(tag); }
+
+  // Returns the allocated size (payload + overhead) of this instance.
+  size_t AllocatedSize() const { return TagToAllocatedSize(tag); }
+};
+
+// Now that CordRepFlat is defined, we can define CordRep's helper casts:
+inline CordRepFlat* CordRep::flat() {
+  assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
+  return reinterpret_cast<CordRepFlat*>(this);
+}
+
+inline const CordRepFlat* CordRep::flat() const {
+  assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
+  return reinterpret_cast<const CordRepFlat*>(this);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_ring.cc b/abseil-cpp/absl/strings/internal/cord_rep_ring.cc
new file mode 100644
index 0000000..af2fc76
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_ring.cc
@@ -0,0 +1,773 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/strings/internal/cord_rep_ring.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_consume.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+using index_type = CordRepRing::index_type;
+
+enum class Direction { kForward, kReversed };
+
+inline bool IsFlatOrExternal(CordRep* rep) {
+  return rep->IsFlat() || rep->IsExternal();
+}
+
+// Verifies that n + extra <= kMaxCapacity: throws std::length_error otherwise.
+inline void CheckCapacity(size_t n, size_t extra) {
+  if (ABSL_PREDICT_FALSE(extra > CordRepRing::kMaxCapacity - n)) {
+    base_internal::ThrowStdLengthError("Maximum capacity exceeded");
+  }
+}
+
+// Creates a flat from the provided string data, allocating up to `extra`
+// capacity in the returned flat depending on kMaxFlatLength limitations.
+// Requires `len` to be less or equal to `kMaxFlatLength`
+CordRepFlat* CreateFlat(const char* s, size_t n, size_t extra = 0) {  // NOLINT
+  assert(n <= kMaxFlatLength);
+  auto* rep = CordRepFlat::New(n + extra);
+  rep->length = n;
+  memcpy(rep->Data(), s, n);
+  return rep;
+}
+
+// Unrefs the entries in `[head, tail)`.
+// Requires all entries to be a FLAT or EXTERNAL node.
+void UnrefEntries(const CordRepRing* rep, index_type head, index_type tail) {
+  rep->ForEach(head, tail, [rep](index_type ix) {
+    CordRep* child = rep->entry_child(ix);
+    if (!child->refcount.Decrement()) {
+      if (child->tag >= FLAT) {
+        CordRepFlat::Delete(child->flat());
+      } else {
+        CordRepExternal::Delete(child->external());
+      }
+    }
+  });
+}
+
+}  // namespace
+
+std::ostream& operator<<(std::ostream& s, const CordRepRing& rep) {
+  // Note: 'pos' values are defined as size_t (for overflow reasons), but that
+  // prints really awkward for small prepended values such as -5. ssize_t is not
+  // portable (POSIX), so we use ptrdiff_t instead to cast to signed values.
+  s << "  CordRepRing(" << &rep << ", length = " << rep.length
+    << ", head = " << rep.head_ << ", tail = " << rep.tail_
+    << ", cap = " << rep.capacity_ << ", rc = " << rep.refcount.Get()
+    << ", begin_pos_ = " << static_cast<ptrdiff_t>(rep.begin_pos_) << ") {\n";
+  CordRepRing::index_type head = rep.head();
+  do {
+    CordRep* child = rep.entry_child(head);
+    s << " entry[" << head << "] length = " << rep.entry_length(head)
+      << ", child " << child << ", clen = " << child->length
+      << ", tag = " << static_cast<int>(child->tag)
+      << ", rc = " << child->refcount.Get()
+      << ", offset = " << rep.entry_data_offset(head)
+      << ", end_pos = " << static_cast<ptrdiff_t>(rep.entry_end_pos(head))
+      << "\n";
+    head = rep.advance(head);
+  } while (head != rep.tail());
+  return s << "}\n";
+}
+
+void CordRepRing::AddDataOffset(index_type index, size_t n) {
+  entry_data_offset()[index] += static_cast<offset_type>(n);
+}
+
+void CordRepRing::SubLength(index_type index, size_t n) {
+  entry_end_pos()[index] -= n;
+}
+
+class CordRepRing::Filler {
+ public:
+  Filler(CordRepRing* rep, index_type pos) : rep_(rep), head_(pos), pos_(pos) {}
+
+  index_type head() const { return head_; }
+  index_type pos() const { return pos_; }
+
+  void Add(CordRep* child, size_t offset, pos_type end_pos) {
+    rep_->entry_end_pos()[pos_] = end_pos;
+    rep_->entry_child()[pos_] = child;
+    rep_->entry_data_offset()[pos_] = static_cast<offset_type>(offset);
+    pos_ = rep_->advance(pos_);
+  }
+
+ private:
+  CordRepRing* rep_;
+  index_type head_;
+  index_type pos_;
+};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordRepRing::kMaxCapacity;
+#endif
+
+bool CordRepRing::IsValid(std::ostream& output) const {
+  if (capacity_ == 0) {
+    output << "capacity == 0";
+    return false;
+  }
+
+  if (head_ >= capacity_ || tail_ >= capacity_) {
+    output << "head " << head_ << " and/or tail " << tail_ << "exceed capacity "
+           << capacity_;
+    return false;
+  }
+
+  const index_type back = retreat(tail_);
+  size_t pos_length = Distance(begin_pos_, entry_end_pos(back));
+  if (pos_length != length) {
+    output << "length " << length << " does not match positional length "
+           << pos_length << " from begin_pos " << begin_pos_ << " and entry["
+           << back << "].end_pos " << entry_end_pos(back);
+    return false;
+  }
+
+  index_type head = head_;
+  pos_type begin_pos = begin_pos_;
+  do {
+    pos_type end_pos = entry_end_pos(head);
+    size_t entry_length = Distance(begin_pos, end_pos);
+    if (entry_length == 0) {
+      output << "entry[" << head << "] has an invalid length " << entry_length
+             << " from begin_pos " << begin_pos << " and end_pos " << end_pos;
+      return false;
+    }
+
+    CordRep* child = entry_child(head);
+    if (child == nullptr) {
+      output << "entry[" << head << "].child == nullptr";
+      return false;
+    }
+    if (child->tag < FLAT && child->tag != EXTERNAL) {
+      output << "entry[" << head << "].child has an invalid tag "
+             << static_cast<int>(child->tag);
+      return false;
+    }
+
+    size_t offset = entry_data_offset(head);
+    if (offset >= child->length || entry_length > child->length - offset) {
+      output << "entry[" << head << "] has offset " << offset
+             << " and entry length " << entry_length
+             << " which are outside of the child's length of " << child->length;
+      return false;
+    }
+
+    begin_pos = end_pos;
+    head = advance(head);
+  } while (head != tail_);
+
+  return true;
+}
+
+#ifdef EXTRA_CORD_RING_VALIDATION
+CordRepRing* CordRepRing::Validate(CordRepRing* rep, const char* file,
+                                   int line) {
+  if (!rep->IsValid(std::cerr)) {
+    std::cerr << "\nERROR: CordRepRing corrupted";
+    if (line) std::cerr << " at line " << line;
+    if (file) std::cerr << " in file " << file;
+    std::cerr << "\nContent = " << *rep;
+    abort();
+  }
+  return rep;
+}
+#endif  // EXTRA_CORD_RING_VALIDATION
+
+CordRepRing* CordRepRing::New(size_t capacity, size_t extra) {
+  CheckCapacity(capacity, extra);
+
+  size_t size = AllocSize(capacity += extra);
+  void* mem = ::operator new(size);
+  auto* rep = new (mem) CordRepRing(static_cast<index_type>(capacity));
+  rep->tag = RING;
+  rep->capacity_ = static_cast<index_type>(capacity);
+  rep->begin_pos_ = 0;
+  return rep;
+}
+
+void CordRepRing::SetCapacityForTesting(size_t capacity) {
+  // Adjust for the changed layout
+  assert(capacity <= capacity_);
+  assert(head() == 0 || head() < tail());
+  memmove(Layout::Partial(capacity).Pointer<1>(data_) + head(),
+          Layout::Partial(capacity_).Pointer<1>(data_) + head(),
+          entries() * sizeof(Layout::ElementType<1>));
+  memmove(Layout::Partial(capacity, capacity).Pointer<2>(data_) + head(),
+          Layout::Partial(capacity_, capacity_).Pointer<2>(data_) + head(),
+          entries() * sizeof(Layout::ElementType<2>));
+  capacity_ = static_cast<index_type>(capacity);
+}
+
+void CordRepRing::Delete(CordRepRing* rep) {
+  assert(rep != nullptr && rep->IsRing());
+#if defined(__cpp_sized_deallocation)
+  size_t size = AllocSize(rep->capacity_);
+  rep->~CordRepRing();
+  ::operator delete(rep, size);
+#else
+  rep->~CordRepRing();
+  ::operator delete(rep);
+#endif
+}
+
+void CordRepRing::Destroy(CordRepRing* rep) {
+  UnrefEntries(rep, rep->head(), rep->tail());
+  Delete(rep);
+}
+
+template <bool ref>
+void CordRepRing::Fill(const CordRepRing* src, index_type head,
+                       index_type tail) {
+  this->length = src->length;
+  head_ = 0;
+  tail_ = advance(0, src->entries(head, tail));
+  begin_pos_ = src->begin_pos_;
+
+  // TODO(mvels): there may be opportunities here for large buffers.
+  auto* dst_pos = entry_end_pos();
+  auto* dst_child = entry_child();
+  auto* dst_offset = entry_data_offset();
+  src->ForEach(head, tail, [&](index_type index) {
+    *dst_pos++ = src->entry_end_pos(index);
+    CordRep* child = src->entry_child(index);
+    *dst_child++ = ref ? CordRep::Ref(child) : child;
+    *dst_offset++ = src->entry_data_offset(index);
+  });
+}
+
+CordRepRing* CordRepRing::Copy(CordRepRing* rep, index_type head,
+                               index_type tail, size_t extra) {
+  CordRepRing* newrep = CordRepRing::New(rep->entries(head, tail), extra);
+  newrep->Fill<true>(rep, head, tail);
+  CordRep::Unref(rep);
+  return newrep;
+}
+
+CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
+  // Get current number of entries, and check for max capacity.
+  size_t entries = rep->entries();
+
+  if (!rep->refcount.IsOne()) {
+    return Copy(rep, rep->head(), rep->tail(), extra);
+  } else if (entries + extra > rep->capacity()) {
+    const size_t min_grow = rep->capacity() + rep->capacity() / 2;
+    const size_t min_extra = (std::max)(extra, min_grow - entries);
+    CordRepRing* newrep = CordRepRing::New(entries, min_extra);
+    newrep->Fill<false>(rep, rep->head(), rep->tail());
+    CordRepRing::Delete(rep);
+    return newrep;
+  } else {
+    return rep;
+  }
+}
+
+Span<char> CordRepRing::GetAppendBuffer(size_t size) {
+  assert(refcount.IsOne());
+  index_type back = retreat(tail_);
+  CordRep* child = entry_child(back);
+  if (child->tag >= FLAT && child->refcount.IsOne()) {
+    size_t capacity = child->flat()->Capacity();
+    pos_type end_pos = entry_end_pos(back);
+    size_t data_offset = entry_data_offset(back);
+    size_t entry_length = Distance(entry_begin_pos(back), end_pos);
+    size_t used = data_offset + entry_length;
+    if (size_t n = (std::min)(capacity - used, size)) {
+      child->length = data_offset + entry_length + n;
+      entry_end_pos()[back] = end_pos + n;
+      this->length += n;
+      return {child->flat()->Data() + used, n};
+    }
+  }
+  return {nullptr, 0};
+}
+
+Span<char> CordRepRing::GetPrependBuffer(size_t size) {
+  assert(refcount.IsOne());
+  CordRep* child = entry_child(head_);
+  size_t data_offset = entry_data_offset(head_);
+  if (data_offset && child->refcount.IsOne() && child->tag >= FLAT) {
+    size_t n = (std::min)(data_offset, size);
+    this->length += n;
+    begin_pos_ -= n;
+    data_offset -= n;
+    entry_data_offset()[head_] = static_cast<offset_type>(data_offset);
+    return {child->flat()->Data() + data_offset, n};
+  }
+  return {nullptr, 0};
+}
+
+CordRepRing* CordRepRing::CreateFromLeaf(CordRep* child, size_t offset,
+                                         size_t len, size_t extra) {
+  CordRepRing* rep = CordRepRing::New(1, extra);
+  rep->head_ = 0;
+  rep->tail_ = rep->advance(0);
+  rep->length = len;
+  rep->entry_end_pos()[0] = len;
+  rep->entry_child()[0] = child;
+  rep->entry_data_offset()[0] = static_cast<offset_type>(offset);
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::CreateSlow(CordRep* child, size_t extra) {
+  CordRepRing* rep = nullptr;
+  Consume(child, [&](CordRep* child_arg, size_t offset, size_t len) {
+    if (IsFlatOrExternal(child_arg)) {
+      rep = rep ? AppendLeaf(rep, child_arg, offset, len)
+                : CreateFromLeaf(child_arg, offset, len, extra);
+    } else if (rep) {
+      rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len);
+    } else if (offset == 0 && child_arg->length == len) {
+      rep = Mutable(child_arg->ring(), extra);
+    } else {
+      rep = SubRing(child_arg->ring(), offset, len, extra);
+    }
+  });
+  return Validate(rep, nullptr, __LINE__);
+}
+
+CordRepRing* CordRepRing::Create(CordRep* child, size_t extra) {
+  size_t length = child->length;
+  if (IsFlatOrExternal(child)) {
+    return CreateFromLeaf(child, 0, length, extra);
+  }
+  if (child->IsRing()) {
+    return Mutable(child->ring(), extra);
+  }
+  return CreateSlow(child, extra);
+}
+
+template <CordRepRing::AddMode mode>
+CordRepRing* CordRepRing::AddRing(CordRepRing* rep, CordRepRing* ring,
+                                  size_t offset, size_t len) {
+  assert(offset < ring->length);
+  constexpr bool append = mode == AddMode::kAppend;
+  Position head = ring->Find(offset);
+  Position tail = ring->FindTail(head.index, offset + len);
+  const index_type entries = ring->entries(head.index, tail.index);
+
+  rep = Mutable(rep, entries);
+
+  // The delta for making ring[head].end_pos into 'len - offset'
+  const pos_type delta_length =
+      (append ? rep->begin_pos_ + rep->length : rep->begin_pos_ - len) -
+      ring->entry_begin_pos(head.index) - head.offset;
+
+  // Start filling at `tail`, or `entries` before `head`
+  Filler filler(rep, append ? rep->tail_ : rep->retreat(rep->head_, entries));
+
+  if (ring->refcount.IsOne()) {
+    // Copy entries from source stealing the ref and adjusting the end position.
+    // Commit the filler as this is no-op.
+    ring->ForEach(head.index, tail.index, [&](index_type ix) {
+      filler.Add(ring->entry_child(ix), ring->entry_data_offset(ix),
+                 ring->entry_end_pos(ix) + delta_length);
+    });
+
+    // Unref entries we did not copy over, and delete source.
+    if (head.index != ring->head_) UnrefEntries(ring, ring->head_, head.index);
+    if (tail.index != ring->tail_) UnrefEntries(ring, tail.index, ring->tail_);
+    CordRepRing::Delete(ring);
+  } else {
+    ring->ForEach(head.index, tail.index, [&](index_type ix) {
+      CordRep* child = ring->entry_child(ix);
+      filler.Add(child, ring->entry_data_offset(ix),
+                 ring->entry_end_pos(ix) + delta_length);
+      CordRep::Ref(child);
+    });
+    CordRepRing::Unref(ring);
+  }
+
+  if (head.offset) {
+    // Increase offset of first 'source' entry appended or prepended.
+    // This is always the entry in `filler.head()`
+    rep->AddDataOffset(filler.head(), head.offset);
+  }
+
+  if (tail.offset) {
+    // Reduce length of last 'source' entry appended or prepended.
+    // This is always the entry tailed by `filler.pos()`
+    rep->SubLength(rep->retreat(filler.pos()), tail.offset);
+  }
+
+  // Commit changes
+  rep->length += len;
+  if (append) {
+    rep->tail_ = filler.pos();
+  } else {
+    rep->head_ = filler.head();
+    rep->begin_pos_ -= len;
+  }
+
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::AppendSlow(CordRepRing* rep, CordRep* child) {
+  Consume(child, [&rep](CordRep* child_arg, size_t offset, size_t len) {
+    if (child_arg->IsRing()) {
+      rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len);
+    } else {
+      rep = AppendLeaf(rep, child_arg, offset, len);
+    }
+  });
+  return rep;
+}
+
+CordRepRing* CordRepRing::AppendLeaf(CordRepRing* rep, CordRep* child,
+                                     size_t offset, size_t len) {
+  rep = Mutable(rep, 1);
+  index_type back = rep->tail_;
+  const pos_type begin_pos = rep->begin_pos_ + rep->length;
+  rep->tail_ = rep->advance(rep->tail_);
+  rep->length += len;
+  rep->entry_end_pos()[back] = begin_pos + len;
+  rep->entry_child()[back] = child;
+  rep->entry_data_offset()[back] = static_cast<offset_type>(offset);
+  return Validate(rep, nullptr, __LINE__);
+}
+
+CordRepRing* CordRepRing::Append(CordRepRing* rep, CordRep* child) {
+  size_t length = child->length;
+  if (IsFlatOrExternal(child)) {
+    return AppendLeaf(rep, child, 0, length);
+  }
+  if (child->IsRing()) {
+    return AddRing<AddMode::kAppend>(rep, child->ring(), 0, length);
+  }
+  return AppendSlow(rep, child);
+}
+
+CordRepRing* CordRepRing::PrependSlow(CordRepRing* rep, CordRep* child) {
+  ReverseConsume(child, [&](CordRep* child_arg, size_t offset, size_t len) {
+    if (IsFlatOrExternal(child_arg)) {
+      rep = PrependLeaf(rep, child_arg, offset, len);
+    } else {
+      rep = AddRing<AddMode::kPrepend>(rep, child_arg->ring(), offset, len);
+    }
+  });
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::PrependLeaf(CordRepRing* rep, CordRep* child,
+                                      size_t offset, size_t len) {
+  rep = Mutable(rep, 1);
+  index_type head = rep->retreat(rep->head_);
+  pos_type end_pos = rep->begin_pos_;
+  rep->head_ = head;
+  rep->length += len;
+  rep->begin_pos_ -= len;
+  rep->entry_end_pos()[head] = end_pos;
+  rep->entry_child()[head] = child;
+  rep->entry_data_offset()[head] = static_cast<offset_type>(offset);
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::Prepend(CordRepRing* rep, CordRep* child) {
+  size_t length = child->length;
+  if (IsFlatOrExternal(child)) {
+    return PrependLeaf(rep, child, 0, length);
+  }
+  if (child->IsRing()) {
+    return AddRing<AddMode::kPrepend>(rep, child->ring(), 0, length);
+  }
+  return PrependSlow(rep, child);
+}
+
+CordRepRing* CordRepRing::Append(CordRepRing* rep, absl::string_view data,
+                                 size_t extra) {
+  if (rep->refcount.IsOne()) {
+    Span<char> avail = rep->GetAppendBuffer(data.length());
+    if (!avail.empty()) {
+      memcpy(avail.data(), data.data(), avail.length());
+      data.remove_prefix(avail.length());
+    }
+  }
+  if (data.empty()) return Validate(rep);
+
+  const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
+  rep = Mutable(rep, flats);
+
+  Filler filler(rep, rep->tail_);
+  pos_type pos = rep->begin_pos_ + rep->length;
+
+  while (data.length() >= kMaxFlatLength) {
+    auto* flat = CreateFlat(data.data(), kMaxFlatLength);
+    filler.Add(flat, 0, pos += kMaxFlatLength);
+    data.remove_prefix(kMaxFlatLength);
+  }
+
+  if (data.length()) {
+    auto* flat = CreateFlat(data.data(), data.length(), extra);
+    filler.Add(flat, 0, pos += data.length());
+  }
+
+  rep->length = pos - rep->begin_pos_;
+  rep->tail_ = filler.pos();
+
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::Prepend(CordRepRing* rep, absl::string_view data,
+                                  size_t extra) {
+  if (rep->refcount.IsOne()) {
+    Span<char> avail = rep->GetPrependBuffer(data.length());
+    if (!avail.empty()) {
+      const char* tail = data.data() + data.length() - avail.length();
+      memcpy(avail.data(), tail, avail.length());
+      data.remove_suffix(avail.length());
+    }
+  }
+  if (data.empty()) return rep;
+
+  const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
+  rep = Mutable(rep, flats);
+  pos_type pos = rep->begin_pos_;
+  Filler filler(rep, rep->retreat(rep->head_, static_cast<index_type>(flats)));
+
+  size_t first_size = data.size() - (flats - 1) * kMaxFlatLength;
+  CordRepFlat* flat = CordRepFlat::New(first_size + extra);
+  flat->length = first_size + extra;
+  memcpy(flat->Data() + extra, data.data(), first_size);
+  data.remove_prefix(first_size);
+  filler.Add(flat, extra, pos);
+  pos -= first_size;
+
+  while (!data.empty()) {
+    assert(data.size() >= kMaxFlatLength);
+    flat = CreateFlat(data.data(), kMaxFlatLength);
+    filler.Add(flat, 0, pos);
+    pos -= kMaxFlatLength;
+    data.remove_prefix(kMaxFlatLength);
+  }
+
+  rep->head_ = filler.head();
+  rep->length += rep->begin_pos_ - pos;
+  rep->begin_pos_ = pos;
+
+  return Validate(rep);
+}
+
+// 32 entries is 32 * sizeof(pos_type) = 4 cache lines on x86
+static constexpr index_type kBinarySearchThreshold = 32;
+static constexpr index_type kBinarySearchEndCount = 8;
+
+template <bool wrap>
+CordRepRing::index_type CordRepRing::FindBinary(index_type head,
+                                                index_type tail,
+                                                size_t offset) const {
+  index_type count = tail + (wrap ? capacity_ : 0) - head;
+  do {
+    count = (count - 1) / 2;
+    assert(count < entries(head, tail_));
+    index_type mid = wrap ? advance(head, count) : head + count;
+    index_type after_mid = wrap ? advance(mid) : mid + 1;
+    bool larger = (offset >= entry_end_offset(mid));
+    head = larger ? after_mid : head;
+    tail = larger ? tail : mid;
+    assert(head != tail);
+  } while (ABSL_PREDICT_TRUE(count > kBinarySearchEndCount));
+  return head;
+}
+
+CordRepRing::Position CordRepRing::FindSlow(index_type head,
+                                            size_t offset) const {
+  index_type tail = tail_;
+
+  // Binary search until we are good for linear search
+  // Optimize for branchless / non wrapping ops
+  if (tail > head) {
+    index_type count = tail - head;
+    if (count > kBinarySearchThreshold) {
+      head = FindBinary<false>(head, tail, offset);
+    }
+  } else {
+    index_type count = capacity_ + tail - head;
+    if (count > kBinarySearchThreshold) {
+      head = FindBinary<true>(head, tail, offset);
+    }
+  }
+
+  pos_type pos = entry_begin_pos(head);
+  pos_type end_pos = entry_end_pos(head);
+  while (offset >= Distance(begin_pos_, end_pos)) {
+    head = advance(head);
+    pos = end_pos;
+    end_pos = entry_end_pos(head);
+  }
+
+  return {head, offset - Distance(begin_pos_, pos)};
+}
+
+CordRepRing::Position CordRepRing::FindTailSlow(index_type head,
+                                                size_t offset) const {
+  index_type tail = tail_;
+  const size_t tail_offset = offset - 1;
+
+  // Binary search until we are good for linear search
+  // Optimize for branchless / non wrapping ops
+  if (tail > head) {
+    index_type count = tail - head;
+    if (count > kBinarySearchThreshold) {
+      head = FindBinary<false>(head, tail, tail_offset);
+    }
+  } else {
+    index_type count = capacity_ + tail - head;
+    if (count > kBinarySearchThreshold) {
+      head = FindBinary<true>(head, tail, tail_offset);
+    }
+  }
+
+  size_t end_offset = entry_end_offset(head);
+  while (tail_offset >= end_offset) {
+    head = advance(head);
+    end_offset = entry_end_offset(head);
+  }
+
+  return {advance(head), end_offset - offset};
+}
+
+char CordRepRing::GetCharacter(size_t offset) const {
+  assert(offset < length);
+
+  Position pos = Find(offset);
+  size_t data_offset = entry_data_offset(pos.index) + pos.offset;
+  return GetRepData(entry_child(pos.index))[data_offset];
+}
+
+CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset,
+                                  size_t len, size_t extra) {
+  assert(offset <= rep->length);
+  assert(offset <= rep->length - len);
+
+  if (len == 0) {
+    CordRep::Unref(rep);
+    return nullptr;
+  }
+
+  // Find position of first byte
+  Position head = rep->Find(offset);
+  Position tail = rep->FindTail(head.index, offset + len);
+  const size_t new_entries = rep->entries(head.index, tail.index);
+
+  if (rep->refcount.IsOne() && extra <= (rep->capacity() - new_entries)) {
+    // We adopt a privately owned rep and no extra entries needed.
+    if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
+    if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
+    rep->head_ = head.index;
+    rep->tail_ = tail.index;
+  } else {
+    // Copy subset to new rep
+    rep = Copy(rep, head.index, tail.index, extra);
+    head.index = rep->head_;
+    tail.index = rep->tail_;
+  }
+
+  // Adjust begin_pos and length
+  rep->length = len;
+  rep->begin_pos_ += offset;
+
+  // Adjust head and tail blocks
+  if (head.offset) {
+    rep->AddDataOffset(head.index, head.offset);
+  }
+  if (tail.offset) {
+    rep->SubLength(rep->retreat(tail.index), tail.offset);
+  }
+
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::RemovePrefix(CordRepRing* rep, size_t len,
+                                       size_t extra) {
+  assert(len <= rep->length);
+  if (len == rep->length) {
+    CordRep::Unref(rep);
+    return nullptr;
+  }
+
+  Position head = rep->Find(len);
+  if (rep->refcount.IsOne()) {
+    if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
+    rep->head_ = head.index;
+  } else {
+    rep = Copy(rep, head.index, rep->tail_, extra);
+    head.index = rep->head_;
+  }
+
+  // Adjust begin_pos and length
+  rep->length -= len;
+  rep->begin_pos_ += len;
+
+  // Adjust head block
+  if (head.offset) {
+    rep->AddDataOffset(head.index, head.offset);
+  }
+
+  return Validate(rep);
+}
+
+CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len,
+                                       size_t extra) {
+  assert(len <= rep->length);
+
+  if (len == rep->length) {
+    CordRep::Unref(rep);
+    return nullptr;
+  }
+
+  Position tail = rep->FindTail(rep->length - len);
+  if (rep->refcount.IsOne()) {
+    // We adopt a privately owned rep, scrub.
+    if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
+    rep->tail_ = tail.index;
+  } else {
+    // Copy subset to new rep
+    rep = Copy(rep, rep->head_, tail.index, extra);
+    tail.index = rep->tail_;
+  }
+
+  // Adjust length
+  rep->length -= len;
+
+  // Adjust tail block
+  if (tail.offset) {
+    rep->SubLength(rep->retreat(tail.index), tail.offset);
+  }
+
+  return Validate(rep);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_ring.h b/abseil-cpp/absl/strings/internal/cord_rep_ring.h
new file mode 100644
index 0000000..79a2fdb
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_ring.h
@@ -0,0 +1,607 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iosfwd>
+#include <limits>
+#include <memory>
+
+#include "absl/container/internal/layout.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// All operations modifying a ring buffer are implemented as static methods
+// requiring a CordRepRing instance with a reference adopted by the method.
+//
+// The methods return the modified ring buffer, which may be equal to the input
+// if the input was not shared, and having large enough capacity to accommodate
+// any newly added node(s). Otherwise, a copy of the input rep with the new
+// node(s) added is returned.
+//
+// Any modification on non shared ring buffers with enough capacity will then
+// require minimum atomic operations. Caller should where possible provide
+// reasonable `extra` hints for both anticipated extra `flat` byte space, as
+// well as anticipated extra nodes required for complex operations.
+//
+// Example of code creating a ring buffer, adding some data to it,
+// and discarding the buffer when done:
+//
+//   void FunWithRings() {
+//     // Create ring with 3 flats
+//     CordRep* flat = CreateFlat("Hello");
+//     CordRepRing* ring = CordRepRing::Create(flat, 2);
+//     ring = CordRepRing::Append(ring, CreateFlat(" "));
+//     ring = CordRepRing::Append(ring, CreateFlat("world"));
+//     DoSomethingWithRing(ring);
+//     CordRep::Unref(ring);
+//   }
+//
+// Example of code Copying an existing ring buffer and modifying it:
+//
+//   void MoreFunWithRings(CordRepRing* src) {
+//     CordRepRing* ring = CordRep::Ref(src)->ring();
+//     ring = CordRepRing::Append(ring, CreateFlat("Hello"));
+//     ring = CordRepRing::Append(ring, CreateFlat(" "));
+//     ring = CordRepRing::Append(ring, CreateFlat("world"));
+//     DoSomethingWithRing(ring);
+//     CordRep::Unref(ring);
+//   }
+//
+class CordRepRing : public CordRep {
+ public:
+  // `pos_type` represents a 'logical position'. A CordRepRing instance has a
+  // `begin_pos` (default 0), and each node inside the buffer will have an
+  // `end_pos` which is the `end_pos` of the previous node (or `begin_pos`) plus
+  // this node's length. The purpose is to allow for a binary search on this
+  // position, while allowing O(1) prepend and append operations.
+  using pos_type = size_t;
+
+  // `index_type` is the type for the `head`, `tail` and `capacity` indexes.
+  // Ring buffers are limited to having no more than four billion entries.
+  using index_type = uint32_t;
+
+  // `offset_type` is the type for the data offset inside a child rep's data.
+  using offset_type = uint32_t;
+
+  // Position holds the node index and relative offset into the node for
+  // some physical offset in the contained data as returned by the Find()
+  // and FindTail() methods.
+  struct Position {
+    index_type index;
+    size_t offset;
+  };
+
+  // The maximum # of child nodes that can be hosted inside a CordRepRing.
+  static constexpr size_t kMaxCapacity = (std::numeric_limits<uint32_t>::max)();
+
+  // CordRepring can not be default constructed, moved, copied or assigned.
+  CordRepRing() = delete;
+  CordRepRing(const CordRepRing&) = delete;
+  CordRepRing& operator=(const CordRepRing&) = delete;
+
+  // Returns true if this instance is valid, false if some or all of the
+  // invariants are broken. Intended for debug purposes only.
+  // `output` receives an explanation of the broken invariants.
+  bool IsValid(std::ostream& output) const;
+
+  // Returns the size in bytes for a CordRepRing with `capacity' entries.
+  static constexpr size_t AllocSize(size_t capacity);
+
+  // Returns the distance in bytes from `pos` to `end_pos`.
+  static constexpr size_t Distance(pos_type pos, pos_type end_pos);
+
+  // Creates a new ring buffer from the provided `rep`. Adopts a reference
+  // on `rep`. The returned ring buffer has a capacity of at least `extra + 1`
+  static CordRepRing* Create(CordRep* child, size_t extra = 0);
+
+  // `head`, `tail` and `capacity` indexes defining the ring buffer boundaries.
+  index_type head() const { return head_; }
+  index_type tail() const { return tail_; }
+  index_type capacity() const { return capacity_; }
+
+  // Returns the number of entries in this instance.
+  index_type entries() const { return entries(head_, tail_); }
+
+  // Returns the logical begin position of this instance.
+  pos_type begin_pos() const { return begin_pos_; }
+
+  // Returns the number of entries for a given head-tail range.
+  // Requires `head` and `tail` values to be less than `capacity()`.
+  index_type entries(index_type head, index_type tail) const {
+    assert(head < capacity_ && tail < capacity_);
+    return tail - head + ((tail > head) ? 0 : capacity_);
+  }
+
+  // Returns the logical end position of entry `index`.
+  pos_type const& entry_end_pos(index_type index) const {
+    assert(IsValidIndex(index));
+    return Layout::Partial().Pointer<0>(data_)[index];
+  }
+
+  // Returns the child pointer of entry `index`.
+  CordRep* const& entry_child(index_type index) const {
+    assert(IsValidIndex(index));
+    return Layout::Partial(capacity()).Pointer<1>(data_)[index];
+  }
+
+  // Returns the data offset of entry `index`
+  offset_type const& entry_data_offset(index_type index) const {
+    assert(IsValidIndex(index));
+    return Layout::Partial(capacity(), capacity()).Pointer<2>(data_)[index];
+  }
+
+  // Appends the provided child node to the `rep` instance.
+  // Adopts a reference from `rep` and `child` which may not be null.
+  // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
+  // containing a FLAT or EXTERNAL node, then flat or external the node is added
+  // 'as is', with an offset added for the SUBSTRING case.
+  // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING or
+  // CONCAT tree, then all child nodes not excluded by any start offset or
+  // length values are added recursively.
+  static CordRepRing* Append(CordRepRing* rep, CordRep* child);
+
+  // Appends the provided string data to the `rep` instance.
+  // This function will attempt to utilize any remaining capacity in the last
+  // node of the input if that node is not shared (directly or indirectly), and
+  // of type FLAT. Remaining data will be added as one or more FLAT nodes.
+  // Any last node added to the ring buffer will be allocated with up to
+  // `extra` bytes of capacity for (anticipated) subsequent append actions.
+  static CordRepRing* Append(CordRepRing* rep, string_view data,
+                             size_t extra = 0);
+
+  // Prepends the provided child node to the `rep` instance.
+  // Adopts a reference from `rep` and `child` which may not be null.
+  // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
+  // containing a FLAT or EXTERNAL node, then flat or external the node is
+  // prepended 'as is', with an optional offset added for the SUBSTRING case.
+  // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING
+  // or CONCAT tree, then all child nodes not excluded by any start offset or
+  // length values are added recursively.
+  static CordRepRing* Prepend(CordRepRing* rep, CordRep* child);
+
+  // Prepends the provided string data to the `rep` instance.
+  // This function will attempt to utilize any remaining capacity in the first
+  // node of the input if that node is not shared (directly or indirectly), and
+  // of type FLAT. Remaining data will be added as one or more FLAT nodes.
+  // Any first node prepnded to the ring buffer will be allocated with up to
+  // `extra` bytes of capacity for (anticipated) subsequent prepend actions.
+  static CordRepRing* Prepend(CordRepRing* rep, string_view data,
+                              size_t extra = 0);
+
+  // Returns a span referencing potentially unused capacity in the last node.
+  // The returned span may be empty if no such capacity is available, or if the
+  // current instance is shared. Else, a span of size `n <= size` is returned.
+  // If non empty, the ring buffer is adjusted to the new length, with the newly
+  // added capacity left uninitialized. Callers should assign a value to the
+  // entire span before any other operations on this instance.
+  Span<char> GetAppendBuffer(size_t size);
+
+  // Returns a span referencing potentially unused capacity in the first node.
+  // This function is identical to GetAppendBuffer except that it returns a span
+  // referencing up to `size` capacity directly before the existing data.
+  Span<char> GetPrependBuffer(size_t size);
+
+  // Returns a cord ring buffer containing `len` bytes of data starting at
+  // `offset`. If the input is not shared, this function will remove all head
+  // and tail child nodes outside of the requested range, and adjust the new
+  // head and tail nodes as required. If the input is shared, this function
+  // returns a new instance sharing some or all of the nodes from the input.
+  static CordRepRing* SubRing(CordRepRing* r, size_t offset, size_t len,
+                              size_t extra = 0);
+
+  // Returns a cord ring buffer with the first `len` bytes removed.
+  // If the input is not shared, this function will remove all head child nodes
+  // fully inside the first `length` bytes, and adjust the new head as required.
+  // If the input is shared, this function returns a new instance sharing some
+  // or all of the nodes from the input.
+  static CordRepRing* RemoveSuffix(CordRepRing* r, size_t len,
+                                   size_t extra = 0);
+
+  // Returns a cord ring buffer with the last `len` bytes removed.
+  // If the input is not shared, this function will remove all head child nodes
+  // fully inside the first `length` bytes, and adjust the new head as required.
+  // If the input is shared, this function returns a new instance sharing some
+  // or all of the nodes from the input.
+  static CordRepRing* RemovePrefix(CordRepRing* r, size_t len,
+                                   size_t extra = 0);
+
+  // Returns the character at `offset`. Requires that `offset < length`.
+  char GetCharacter(size_t offset) const;
+
+  // Returns true if this instance manages a single contiguous buffer, in which
+  // case the (optional) output parameter `fragment` is set. Otherwise, the
+  // function returns false, and `fragment` is left unchanged.
+  bool IsFlat(absl::string_view* fragment) const;
+
+  // Returns true if the data starting at `offset` with length `len` is
+  // managed by this instance inside a single contiguous buffer, in which case
+  // the (optional) output parameter `fragment` is set to the contiguous memory
+  // starting at offset `offset` with length `length`. Otherwise, the function
+  // returns false, and `fragment` is left unchanged.
+  bool IsFlat(size_t offset, size_t len, absl::string_view* fragment) const;
+
+  // Testing only: set capacity to requested capacity.
+  void SetCapacityForTesting(size_t capacity);
+
+  // Returns the CordRep data pointer for the provided CordRep.
+  // Requires that the provided `rep` is either a FLAT or EXTERNAL CordRep.
+  static const char* GetLeafData(const CordRep* rep);
+
+  // Returns the CordRep data pointer for the provided CordRep.
+  // Requires that `rep` is either a FLAT, EXTERNAL, or SUBSTRING CordRep.
+  static const char* GetRepData(const CordRep* rep);
+
+  // Advances the provided position, wrapping around capacity as needed.
+  // Requires `index` < capacity()
+  inline index_type advance(index_type index) const;
+
+  // Advances the provided position by 'n`, wrapping around capacity as needed.
+  // Requires `index` < capacity() and `n` <= capacity.
+  inline index_type advance(index_type index, index_type n) const;
+
+  // Retreats the provided position, wrapping around 0 as needed.
+  // Requires `index` < capacity()
+  inline index_type retreat(index_type index) const;
+
+  // Retreats the provided position by 'n', wrapping around 0 as needed.
+  // Requires `index` < capacity()
+  inline index_type retreat(index_type index, index_type n) const;
+
+  // Returns the logical begin position of entry `index`
+  pos_type const& entry_begin_pos(index_type index) const {
+    return (index == head_) ? begin_pos_ : entry_end_pos(retreat(index));
+  }
+
+  // Returns the physical start offset of entry `index`
+  size_t entry_start_offset(index_type index) const {
+    return Distance(begin_pos_, entry_begin_pos(index));
+  }
+
+  // Returns the physical end offset of entry `index`
+  size_t entry_end_offset(index_type index) const {
+    return Distance(begin_pos_, entry_end_pos(index));
+  }
+
+  // Returns the data length for entry `index`
+  size_t entry_length(index_type index) const {
+    return Distance(entry_begin_pos(index), entry_end_pos(index));
+  }
+
+  // Returns the data for entry `index`
+  absl::string_view entry_data(index_type index) const;
+
+  // Returns the position for `offset` as {index, prefix}. `index` holds the
+  // index of the entry at the specified offset and `prefix` holds the relative
+  // offset inside that entry.
+  // Requires `offset` < length.
+  //
+  // For example we can implement GetCharacter(offset) as:
+  //   char GetCharacter(size_t offset) {
+  //     Position pos = this->Find(offset);
+  //     return this->entry_data(pos.pos)[pos.offset];
+  //   }
+  inline Position Find(size_t offset) const;
+
+  // Find starting at `head`
+  inline Position Find(index_type head, size_t offset) const;
+
+  // Returns the tail position for `offset` as {tail index, suffix}.
+  // `tail index` holds holds the index of the entry holding the offset directly
+  // before 'offset` advanced by one. 'suffix` holds the relative offset from
+  // that relative offset in the entry to the end of the entry.
+  // For example, FindTail(length) will return {tail(), 0}, FindTail(length - 5)
+  // will return {retreat(tail), 5)} provided the preceding entry contains at
+  // least 5 bytes of data.
+  // Requires offset >= 1 && offset <= length.
+  //
+  // This function is very useful in functions that need to clip the end of some
+  // ring buffer such as 'RemovePrefix'.
+  // For example, we could implement RemovePrefix for non shared instances as:
+  //   void RemoveSuffix(size_t n) {
+  //     Position pos = FindTail(length - n);
+  //     UnrefEntries(pos.pos, this->tail_);
+  //     this->tail_ = pos.pos;
+  //     entry(retreat(pos.pos)).end_pos -= pos.offset;
+  //   }
+  inline Position FindTail(size_t offset) const;
+
+  // Find tail starting at `head`
+  inline Position FindTail(index_type head, size_t offset) const;
+
+  // Invokes f(index_type index) for each entry inside the range [head, tail>
+  template <typename F>
+  void ForEach(index_type head, index_type tail, F&& f) const {
+    index_type n1 = (tail > head) ? tail : capacity_;
+    for (index_type i = head; i < n1; ++i) f(i);
+    if (tail <= head) {
+      for (index_type i = 0; i < tail; ++i) f(i);
+    }
+  }
+
+  // Invokes f(index_type index) for each entry inside this instance.
+  template <typename F>
+  void ForEach(F&& f) const {
+    ForEach(head_, tail_, std::forward<F>(f));
+  }
+
+  // Dump this instance's data tp stream `s` in human readable format, excluding
+  // the actual data content itself. Intended for debug purposes only.
+  friend std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
+
+ private:
+  enum class AddMode { kAppend, kPrepend };
+
+  using Layout = container_internal::Layout<pos_type, CordRep*, offset_type>;
+
+  class Filler;
+  class Transaction;
+  class CreateTransaction;
+
+  static constexpr size_t kLayoutAlignment = Layout::Partial().Alignment();
+
+  // Creates a new CordRepRing.
+  explicit CordRepRing(index_type capacity) : capacity_(capacity) {}
+
+  // Returns true if `index` is a valid index into this instance.
+  bool IsValidIndex(index_type index) const;
+
+  // Debug use only: validates the provided CordRepRing invariants.
+  // Verification of all CordRepRing methods can be enabled by defining
+  // EXTRA_CORD_RING_VALIDATION, i.e.: `--copts=-DEXTRA_CORD_RING_VALIDATION`
+  // Verification is VERY expensive, so only do it for debugging purposes.
+  static CordRepRing* Validate(CordRepRing* rep, const char* file = nullptr,
+                               int line = 0);
+
+  // Allocates a CordRepRing large enough to hold `capacity + extra' entries.
+  // The returned capacity may be larger if the allocated memory allows for it.
+  // The maximum capacity of a CordRepRing is capped at kMaxCapacity.
+  // Throws `std::length_error` if `capacity + extra' exceeds kMaxCapacity.
+  static CordRepRing* New(size_t capacity, size_t extra);
+
+  // Deallocates (but does not destroy) the provided ring buffer.
+  static void Delete(CordRepRing* rep);
+
+  // Destroys the provided ring buffer, decrementing the reference count of all
+  // contained child CordReps. The provided 1\`rep` should have a ref count of
+  // one (pre decrement destroy call observing `refcount.IsOne()`) or zero
+  // (post decrement destroy call observing `!refcount.Decrement()`).
+  static void Destroy(CordRepRing* rep);
+
+  // Returns a mutable reference to the logical end position array.
+  pos_type* entry_end_pos() {
+    return Layout::Partial().Pointer<0>(data_);
+  }
+
+  // Returns a mutable reference to the child pointer array.
+  CordRep** entry_child() {
+    return Layout::Partial(capacity()).Pointer<1>(data_);
+  }
+
+  // Returns a mutable reference to the data offset array.
+  offset_type* entry_data_offset() {
+    return Layout::Partial(capacity(), capacity()).Pointer<2>(data_);
+  }
+
+  // Find implementations for the non fast path 0 / length cases.
+  Position FindSlow(index_type head, size_t offset) const;
+  Position FindTailSlow(index_type head, size_t offset) const;
+
+  // Finds the index of the first node that is inside a reasonable distance
+  // of the node at `offset` from which we can continue with a linear search.
+  template <bool wrap>
+  index_type FindBinary(index_type head, index_type tail, size_t offset) const;
+
+  // Fills the current (initialized) instance from the provided source, copying
+  // entries [head, tail). Adds a reference to copied entries if `ref` is true.
+  template <bool ref>
+  void Fill(const CordRepRing* src, index_type head, index_type tail);
+
+  // Create a copy of 'rep', copying all entries [head, tail), allocating room
+  // for `extra` entries. Adds a reference on all copied entries.
+  static CordRepRing* Copy(CordRepRing* rep, index_type head, index_type tail,
+                           size_t extra = 0);
+
+  // Returns a Mutable CordRepRing reference from `rep` with room for at least
+  // `extra` additional nodes. Adopts a reference count from `rep`.
+  // This function will return `rep` if, and only if:
+  // - rep.entries + extra <= rep.capacity
+  // - rep.refcount == 1
+  // Otherwise, this function will create a new copy of `rep` with additional
+  // capacity to satisfy `extra` extra nodes, and unref the old `rep` instance.
+  //
+  // If a new CordRepRing can not be allocated, or the new capacity would exceed
+  // the maximum capacity, then the input is consumed only, and an exception is
+  // thrown.
+  static CordRepRing* Mutable(CordRepRing* rep, size_t extra);
+
+  // Slow path for Append(CordRepRing* rep, CordRep* child). This function is
+  // exercised if the provided `child` in Append() is not a leaf node, i.e., a
+  // ring buffer or old (concat) cord tree.
+  static CordRepRing* AppendSlow(CordRepRing* rep, CordRep* child);
+
+  // Appends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
+  static CordRepRing* AppendLeaf(CordRepRing* rep, CordRep* child,
+                                 size_t offset, size_t length);
+
+  // Prepends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
+  static CordRepRing* PrependLeaf(CordRepRing* rep, CordRep* child,
+                                  size_t offset, size_t length);
+
+  // Slow path for Prepend(CordRepRing* rep, CordRep* child). This function is
+  // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
+  // ring buffer or old (concat) cord tree.
+  static CordRepRing* PrependSlow(CordRepRing* rep, CordRep* child);
+
+  // Slow path for Create(CordRep* child, size_t extra). This function is
+  // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
+  // ring buffer or old (concat) cord tree.
+  static CordRepRing* CreateSlow(CordRep* child, size_t extra);
+
+  // Creates a new ring buffer from the provided `child` leaf node. Requires
+  // `child` to be FLAT or EXTERNAL. on `rep`.
+  // The returned ring buffer has a capacity of at least `1 + extra`
+  static CordRepRing* CreateFromLeaf(CordRep* child, size_t offset,
+                                     size_t length, size_t extra);
+
+  // Appends or prepends (depending on AddMode) the ring buffer in `ring' to
+  // `rep` starting at `offset` with length `len`.
+  template <AddMode mode>
+  static CordRepRing* AddRing(CordRepRing* rep, CordRepRing* ring,
+                              size_t offset, size_t len);
+
+  // Increases the data offset for entry `index` by `n`.
+  void AddDataOffset(index_type index, size_t n);
+
+  // Decreases the length for entry `index` by `n`.
+  void SubLength(index_type index, size_t n);
+
+  index_type head_;
+  index_type tail_;
+  index_type capacity_;
+  pos_type begin_pos_;
+
+  alignas(kLayoutAlignment) char data_[kLayoutAlignment];
+
+  friend struct CordRep;
+};
+
+constexpr size_t CordRepRing::AllocSize(size_t capacity) {
+  return sizeof(CordRepRing) - sizeof(data_) +
+         Layout(capacity, capacity, capacity).AllocSize();
+}
+
+inline constexpr size_t CordRepRing::Distance(pos_type pos, pos_type end_pos) {
+  return (end_pos - pos);
+}
+
+inline const char* CordRepRing::GetLeafData(const CordRep* rep) {
+  return rep->tag != EXTERNAL ? rep->flat()->Data() : rep->external()->base;
+}
+
+inline const char* CordRepRing::GetRepData(const CordRep* rep) {
+  if (rep->tag >= FLAT) return rep->flat()->Data();
+  if (rep->tag == EXTERNAL) return rep->external()->base;
+  return GetLeafData(rep->substring()->child) + rep->substring()->start;
+}
+
+inline CordRepRing::index_type CordRepRing::advance(index_type index) const {
+  assert(index < capacity_);
+  return ++index == capacity_ ? 0 : index;
+}
+
+inline CordRepRing::index_type CordRepRing::advance(index_type index,
+                                                    index_type n) const {
+  assert(index < capacity_ && n <= capacity_);
+  return (index += n) >= capacity_ ? index - capacity_ : index;
+}
+
+inline CordRepRing::index_type CordRepRing::retreat(index_type index) const {
+  assert(index < capacity_);
+  return (index > 0 ? index : capacity_) - 1;
+}
+
+inline CordRepRing::index_type CordRepRing::retreat(index_type index,
+                                                    index_type n) const {
+  assert(index < capacity_ && n <= capacity_);
+  return index >= n ? index - n : capacity_ - n + index;
+}
+
+inline absl::string_view CordRepRing::entry_data(index_type index) const {
+  size_t data_offset = entry_data_offset(index);
+  return {GetRepData(entry_child(index)) + data_offset, entry_length(index)};
+}
+
+inline bool CordRepRing::IsValidIndex(index_type index) const {
+  if (index >= capacity_) return false;
+  return (tail_ > head_) ? (index >= head_ && index < tail_)
+                         : (index >= head_ || index < tail_);
+}
+
+#ifndef EXTRA_CORD_RING_VALIDATION
+inline CordRepRing* CordRepRing::Validate(CordRepRing* rep,
+                                          const char* /*file*/, int /*line*/) {
+  return rep;
+}
+#endif
+
+inline CordRepRing::Position CordRepRing::Find(size_t offset) const {
+  assert(offset < length);
+  return (offset == 0) ? Position{head_, 0} : FindSlow(head_, offset);
+}
+
+inline CordRepRing::Position CordRepRing::Find(index_type head,
+                                               size_t offset) const {
+  assert(offset < length);
+  assert(IsValidIndex(head) && offset >= entry_start_offset(head));
+  return (offset == 0) ? Position{head_, 0} : FindSlow(head, offset);
+}
+
+inline CordRepRing::Position CordRepRing::FindTail(size_t offset) const {
+  assert(offset > 0 && offset <= length);
+  return (offset == length) ? Position{tail_, 0} : FindTailSlow(head_, offset);
+}
+
+inline CordRepRing::Position CordRepRing::FindTail(index_type head,
+                                                   size_t offset) const {
+  assert(offset > 0 && offset <= length);
+  assert(IsValidIndex(head) && offset >= entry_start_offset(head) + 1);
+  return (offset == length) ? Position{tail_, 0} : FindTailSlow(head, offset);
+}
+
+// Now that CordRepRing is defined, we can define CordRep's helper casts:
+inline CordRepRing* CordRep::ring() {
+  assert(IsRing());
+  return static_cast<CordRepRing*>(this);
+}
+
+inline const CordRepRing* CordRep::ring() const {
+  assert(IsRing());
+  return static_cast<const CordRepRing*>(this);
+}
+
+inline bool CordRepRing::IsFlat(absl::string_view* fragment) const {
+  if (entries() == 1) {
+    if (fragment) *fragment = entry_data(head());
+    return true;
+  }
+  return false;
+}
+
+inline bool CordRepRing::IsFlat(size_t offset, size_t len,
+                                absl::string_view* fragment) const {
+  const Position pos = Find(offset);
+  const absl::string_view data = entry_data(pos.index);
+  if (data.length() >= len && data.length() - len >= pos.offset) {
+    if (fragment) *fragment = data.substr(pos.offset, len);
+    return true;
+  }
+  return false;
+}
+
+std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h b/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h
new file mode 100644
index 0000000..7ceeaa0
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h
@@ -0,0 +1,118 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepRingReader provides basic navigation over CordRepRing data.
+class CordRepRingReader {
+ public:
+  // Returns true if this instance is not empty.
+  explicit operator bool() const { return ring_ != nullptr; }
+
+  // Returns the ring buffer reference for this instance, or nullptr if empty.
+  CordRepRing* ring() const { return ring_; }
+
+  // Returns the current node index inside the ring buffer for this instance.
+  // The returned value is undefined if this instance is empty.
+  CordRepRing::index_type index() const { return index_; }
+
+  // Returns the current node inside the ring buffer for this instance.
+  // The returned value is undefined if this instance is empty.
+  CordRep* node() const { return ring_->entry_child(index_); }
+
+  // Returns the length of the referenced ring buffer.
+  // Requires the current instance to be non empty.
+  size_t length() const {
+    assert(ring_);
+    return ring_->length;
+  }
+
+  // Returns the end offset of the last navigated-to chunk, which represents the
+  // total bytes 'consumed' relative to the start of the ring. The returned
+  // value is never zero. For example, initializing a reader with a ring buffer
+  // with a first chunk of 19 bytes will return consumed() = 19.
+  // Requires the current instance to be non empty.
+  size_t consumed() const {
+    assert(ring_);
+    return ring_->entry_end_offset(index_);
+  }
+
+  // Returns the number of bytes remaining beyond the last navigated-to chunk.
+  // Requires the current instance to be non empty.
+  size_t remaining() const {
+    assert(ring_);
+    return length() - consumed();
+  }
+
+  // Resets this instance to an empty value
+  void Reset() { ring_ = nullptr; }
+
+  // Resets this instance to the start of `ring`. `ring` must not be null.
+  // Returns a reference into the first chunk of the provided ring.
+  absl::string_view Reset(CordRepRing* ring) {
+    assert(ring);
+    ring_ = ring;
+    index_ = ring_->head();
+    return ring_->entry_data(index_);
+  }
+
+  // Navigates to the next chunk inside the reference ring buffer.
+  // Returns a reference into the navigated-to chunk.
+  // Requires remaining() to be non zero.
+  absl::string_view Next() {
+    assert(remaining());
+    index_ = ring_->advance(index_);
+    return ring_->entry_data(index_);
+  }
+
+  // Navigates to the chunk at offset `offset`.
+  // Returns a reference into the navigated-to chunk, adjusted for the relative
+  // position of `offset` into that chunk. For example, calling Seek(13) on a
+  // ring buffer containing 2 chunks of 10 and 20 bytes respectively will return
+  // a string view into the second chunk starting at offset 3 with a size of 17.
+  // Requires `offset` to be less than `length()`
+  absl::string_view Seek(size_t offset) {
+    assert(offset < length());
+    size_t current = ring_->entry_end_offset(index_);
+    CordRepRing::index_type hint = (offset >= current) ? index_ : ring_->head();
+    const CordRepRing::Position head = ring_->Find(hint, offset);
+    index_ = head.index;
+    auto data = ring_->entry_data(head.index);
+    data.remove_prefix(head.offset);
+    return data;
+  }
+
+ private:
+  CordRepRing* ring_ = nullptr;
+  CordRepRing::index_type index_;
+};
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
diff --git a/abseil-cpp/absl/strings/internal/cord_rep_test_util.h b/abseil-cpp/absl/strings/internal/cord_rep_test_util.h
new file mode 100644
index 0000000..18a0a19
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cord_rep_test_util.h
@@ -0,0 +1,205 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_
+
+#include <cassert>
+#include <memory>
+#include <random>
+#include <string>
+#include <vector>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cordrep_testing {
+
+inline cord_internal::CordRepSubstring* MakeSubstring(
+    size_t start, size_t len, cord_internal::CordRep* rep) {
+  auto* sub = new cord_internal::CordRepSubstring;
+  sub->tag = cord_internal::SUBSTRING;
+  sub->start = start;
+  sub->length = len <= 0 ? rep->length - start + len : len;
+  sub->child = rep;
+  return sub;
+}
+
+inline cord_internal::CordRepFlat* MakeFlat(absl::string_view value) {
+  assert(value.length() <= cord_internal::kMaxFlatLength);
+  auto* flat = cord_internal::CordRepFlat::New(value.length());
+  flat->length = value.length();
+  memcpy(flat->Data(), value.data(), value.length());
+  return flat;
+}
+
+// Creates an external node for testing
+inline cord_internal::CordRepExternal* MakeExternal(absl::string_view s) {
+  struct Rep : public cord_internal::CordRepExternal {
+    std::string s;
+    explicit Rep(absl::string_view sv) : s(sv) {
+      this->tag = cord_internal::EXTERNAL;
+      this->base = s.data();
+      this->length = s.length();
+      this->releaser_invoker = [](cord_internal::CordRepExternal* self) {
+        delete static_cast<Rep*>(self);
+      };
+    }
+  };
+  return new Rep(s);
+}
+
+inline std::string CreateRandomString(size_t n) {
+  absl::string_view data =
+      "abcdefghijklmnopqrstuvwxyz"
+      "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+      "0123456789~!@#$%^&*()_+=-<>?:\"{}[]|";
+  std::minstd_rand rnd;
+  std::uniform_int_distribution<size_t> dist(0, data.size() - 1);
+  std::string s(n, ' ');
+  for (size_t i = 0; i < n; ++i) {
+    s[i] = data[dist(rnd)];
+  }
+  return s;
+}
+
+// Creates an array of flats from the provided string, chopping
+// the provided string up into flats of size `chunk_size` characters
+// resulting in roughly `data.size() / chunk_size` total flats.
+inline std::vector<cord_internal::CordRep*> CreateFlatsFromString(
+    absl::string_view data, size_t chunk_size) {
+  assert(chunk_size > 0);
+  std::vector<cord_internal::CordRep*> flats;
+  for (absl::string_view s = data; !s.empty(); s.remove_prefix(chunk_size)) {
+    flats.push_back(MakeFlat(s.substr(0, chunk_size)));
+  }
+  return flats;
+}
+
+inline cord_internal::CordRepBtree* CordRepBtreeFromFlats(
+    absl::Span<cord_internal::CordRep* const> flats) {
+  assert(!flats.empty());
+  auto* node = cord_internal::CordRepBtree::Create(flats[0]);
+  for (size_t i = 1; i < flats.size(); ++i) {
+    node = cord_internal::CordRepBtree::Append(node, flats[i]);
+  }
+  return node;
+}
+
+template <typename Fn>
+inline void CordVisitReps(cord_internal::CordRep* rep, Fn&& fn) {
+  fn(rep);
+  while (rep->tag == cord_internal::SUBSTRING) {
+    rep = rep->substring()->child;
+    fn(rep);
+  }
+  if (rep->tag == cord_internal::BTREE) {
+    for (cord_internal::CordRep* edge : rep->btree()->Edges()) {
+      CordVisitReps(edge, fn);
+    }
+  }
+}
+
+template <typename Predicate>
+inline std::vector<cord_internal::CordRep*> CordCollectRepsIf(
+    Predicate&& predicate, cord_internal::CordRep* rep) {
+  std::vector<cord_internal::CordRep*> reps;
+  CordVisitReps(rep, [&reps, &predicate](cord_internal::CordRep* rep) {
+    if (predicate(rep)) reps.push_back(rep);
+  });
+  return reps;
+}
+
+inline std::vector<cord_internal::CordRep*> CordCollectReps(
+    cord_internal::CordRep* rep) {
+  std::vector<cord_internal::CordRep*> reps;
+  auto fn = [&reps](cord_internal::CordRep* rep) { reps.push_back(rep); };
+  CordVisitReps(rep, fn);
+  return reps;
+}
+
+inline void CordToString(cord_internal::CordRep* rep, std::string& s) {
+  size_t offset = 0;
+  size_t length = rep->length;
+  while (rep->tag == cord_internal::SUBSTRING) {
+    offset += rep->substring()->start;
+    rep = rep->substring()->child;
+  }
+  if (rep->tag == cord_internal::BTREE) {
+    for (cord_internal::CordRep* edge : rep->btree()->Edges()) {
+      CordToString(edge, s);
+    }
+  } else if (rep->tag >= cord_internal::FLAT) {
+    s.append(rep->flat()->Data() + offset, length);
+  } else if (rep->tag == cord_internal::EXTERNAL) {
+    s.append(rep->external()->base + offset, length);
+  } else {
+    ABSL_RAW_LOG(FATAL, "Unsupported tag %d", rep->tag);
+  }
+}
+
+inline std::string CordToString(cord_internal::CordRep* rep) {
+  std::string s;
+  s.reserve(rep->length);
+  CordToString(rep, s);
+  return s;
+}
+
+// RAII Helper class to automatically unref reps on destruction.
+class AutoUnref {
+ public:
+  ~AutoUnref() {
+    for (CordRep* rep : unrefs_) CordRep::Unref(rep);
+  }
+
+  // Adds `rep` to the list of reps to be unreffed at destruction.
+  template <typename CordRepType>
+  CordRepType* Add(CordRepType* rep) {
+    unrefs_.push_back(rep);
+    return rep;
+  }
+
+  // Increments the reference count of `rep` by one, and adds it to
+  // the list of reps to be unreffed at destruction.
+  template <typename CordRepType>
+  CordRepType* Ref(CordRepType* rep) {
+    unrefs_.push_back(CordRep::Ref(rep));
+    return rep;
+  }
+
+  // Increments the reference count of `rep` by one if `condition` is true,
+  // and adds it to the list of reps to be unreffed at destruction.
+  template <typename CordRepType>
+  CordRepType* RefIf(bool condition, CordRepType* rep) {
+    if (condition) unrefs_.push_back(CordRep::Ref(rep));
+    return rep;
+  }
+
+ private:
+  using CordRep = absl::cord_internal::CordRep;
+
+  std::vector<CordRep*> unrefs_;
+};
+
+}  // namespace cordrep_testing
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_functions.cc b/abseil-cpp/absl/strings/internal/cordz_functions.cc
new file mode 100644
index 0000000..20d314f
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_functions.cc
@@ -0,0 +1,96 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_functions.h"
+
+#include <atomic>
+#include <cmath>
+#include <limits>
+#include <random>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/profiling/internal/exponential_biased.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+// The average interval until the next sample. A value of 0 disables profiling
+// while a value of 1 will profile all Cords.
+std::atomic<int> g_cordz_mean_interval(50000);
+
+}  // namespace
+
+#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+
+// Special negative 'not initialized' per thread value for cordz_next_sample.
+static constexpr int64_t kInitCordzNextSample = -1;
+
+ABSL_CONST_INIT thread_local int64_t cordz_next_sample = kInitCordzNextSample;
+
+// kIntervalIfDisabled is the number of profile-eligible events need to occur
+// before the code will confirm that cordz is still disabled.
+constexpr int64_t kIntervalIfDisabled = 1 << 16;
+
+ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() {
+
+  thread_local absl::profiling_internal::ExponentialBiased
+      exponential_biased_generator;
+  int32_t mean_interval = get_cordz_mean_interval();
+
+  // Check if we disabled profiling. If so, set the next sample to a "large"
+  // number to minimize the overhead of the should_profile codepath.
+  if (mean_interval <= 0) {
+    cordz_next_sample = kIntervalIfDisabled;
+    return false;
+  }
+
+  // Check if we're always sampling.
+  if (mean_interval == 1) {
+    cordz_next_sample = 1;
+    return true;
+  }
+
+  if (cordz_next_sample <= 0) {
+    // If first check on current thread, check cordz_should_profile()
+    // again using the created (initial) stride in cordz_next_sample.
+    const bool initialized = cordz_next_sample != kInitCordzNextSample;
+    cordz_next_sample = exponential_biased_generator.GetStride(mean_interval);
+    return initialized || cordz_should_profile();
+  }
+
+  --cordz_next_sample;
+  return false;
+}
+
+void cordz_set_next_sample_for_testing(int64_t next_sample) {
+  cordz_next_sample = next_sample;
+}
+
+#endif  // ABSL_INTERNAL_CORDZ_ENABLED
+
+int32_t get_cordz_mean_interval() {
+  return g_cordz_mean_interval.load(std::memory_order_acquire);
+}
+
+void set_cordz_mean_interval(int32_t mean_interval) {
+  g_cordz_mean_interval.store(mean_interval, std::memory_order_release);
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_functions.h b/abseil-cpp/absl/strings/internal/cordz_functions.h
new file mode 100644
index 0000000..ed108bf
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_functions.h
@@ -0,0 +1,77 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_
+
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns the current sample rate. This represents the average interval
+// between samples.
+int32_t get_cordz_mean_interval();
+
+// Sets the sample rate with the average interval between samples.
+void set_cordz_mean_interval(int32_t mean_interval);
+
+// Cordz is only enabled on Linux with thread_local support.
+#if defined(ABSL_INTERNAL_CORDZ_ENABLED)
+#error ABSL_INTERNAL_CORDZ_ENABLED cannot be set directly
+#elif defined(__linux__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_INTERNAL_CORDZ_ENABLED 1
+#endif
+
+#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+
+// cordz_next_sample is the number of events until the next sample event. If
+// the value is 1 or less, the code will check on the next event if cordz is
+// enabled, and if so, will sample the Cord. cordz is only enabled when we can
+// use thread locals.
+ABSL_CONST_INIT extern thread_local int64_t cordz_next_sample;
+
+// Determines if the next sample should be profiled. If it is, the value pointed
+// at by next_sample will be set with the interval until the next sample.
+bool cordz_should_profile_slow();
+
+// Returns true if the next cord should be sampled.
+inline bool cordz_should_profile() {
+  if (ABSL_PREDICT_TRUE(cordz_next_sample > 1)) {
+    cordz_next_sample--;
+    return false;
+  }
+  return cordz_should_profile_slow();
+}
+
+// Sets the interval until the next sample (for testing only)
+void cordz_set_next_sample_for_testing(int64_t next_sample);
+
+#else  // ABSL_INTERNAL_CORDZ_ENABLED
+
+inline bool cordz_should_profile() { return false; }
+inline void cordz_set_next_sample_for_testing(int64_t) {}
+
+#endif  // ABSL_INTERNAL_CORDZ_ENABLED
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_functions_test.cc b/abseil-cpp/absl/strings/internal/cordz_functions_test.cc
new file mode 100644
index 0000000..b70a685
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_functions_test.cc
@@ -0,0 +1,149 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_functions.h"
+
+#include <thread>  // NOLINT we need real clean new threads
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::Eq;
+using ::testing::Ge;
+using ::testing::Le;
+
+TEST(CordzFunctionsTest, SampleRate) {
+  int32_t orig_sample_rate = get_cordz_mean_interval();
+  int32_t expected_sample_rate = 123;
+  set_cordz_mean_interval(expected_sample_rate);
+  EXPECT_THAT(get_cordz_mean_interval(), Eq(expected_sample_rate));
+  set_cordz_mean_interval(orig_sample_rate);
+}
+
+// Cordz is disabled when we don't have thread_local. All calls to
+// should_profile will return false when cordz is disabled, so we might want to
+// avoid those tests.
+#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+
+TEST(CordzFunctionsTest, ShouldProfileDisable) {
+  int32_t orig_sample_rate = get_cordz_mean_interval();
+
+  set_cordz_mean_interval(0);
+  cordz_set_next_sample_for_testing(0);
+  EXPECT_FALSE(cordz_should_profile());
+  // 1 << 16 is from kIntervalIfDisabled in cordz_functions.cc.
+  EXPECT_THAT(cordz_next_sample, Eq(1 << 16));
+
+  set_cordz_mean_interval(orig_sample_rate);
+}
+
+TEST(CordzFunctionsTest, ShouldProfileAlways) {
+  int32_t orig_sample_rate = get_cordz_mean_interval();
+
+  set_cordz_mean_interval(1);
+  cordz_set_next_sample_for_testing(1);
+  EXPECT_TRUE(cordz_should_profile());
+  EXPECT_THAT(cordz_next_sample, Le(1));
+
+  set_cordz_mean_interval(orig_sample_rate);
+}
+
+TEST(CordzFunctionsTest, DoesNotAlwaysSampleFirstCord) {
+  // Set large enough interval such that the chance of 'tons' of threads
+  // randomly sampling the first call is infinitely small.
+  set_cordz_mean_interval(10000);
+  int tries = 0;
+  bool sampled = false;
+  do {
+    ++tries;
+    ASSERT_THAT(tries, Le(1000));
+    std::thread thread([&sampled] {
+      sampled = cordz_should_profile();
+    });
+    thread.join();
+  } while (sampled);
+}
+
+TEST(CordzFunctionsTest, ShouldProfileRate) {
+  static constexpr int kDesiredMeanInterval = 1000;
+  static constexpr int kSamples = 10000;
+  int32_t orig_sample_rate = get_cordz_mean_interval();
+
+  set_cordz_mean_interval(kDesiredMeanInterval);
+
+  int64_t sum_of_intervals = 0;
+  for (int i = 0; i < kSamples; i++) {
+    // Setting next_sample to 0 will force cordz_should_profile to generate a
+    // new value for next_sample each iteration.
+    cordz_set_next_sample_for_testing(0);
+    cordz_should_profile();
+    sum_of_intervals += cordz_next_sample;
+  }
+
+  // The sum of independent exponential variables is an Erlang distribution,
+  // which is a gamma distribution where the shape parameter is equal to the
+  // number of summands. The distribution used for cordz_should_profile is
+  // actually floor(Exponential(1/mean)) which introduces bias. However, we can
+  // apply the squint-really-hard correction factor. That is, when mean is
+  // large, then if we squint really hard the shape of the distribution between
+  // N and N+1 looks like a uniform distribution. On average, each value for
+  // next_sample will be about 0.5 lower than we would expect from an
+  // exponential distribution. This squint-really-hard correction approach won't
+  // work when mean is smaller than about 10 but works fine when mean is 1000.
+  //
+  // We can use R to calculate a confidence interval. This
+  // shows how to generate a confidence interval with a false positive rate of
+  // one in a billion.
+  //
+  // $ R -q
+  // > mean = 1000
+  // > kSamples = 10000
+  // > errorRate = 1e-9
+  // > correction = -kSamples / 2
+  // > low = qgamma(errorRate/2, kSamples, 1/mean) + correction
+  // > high = qgamma(1 - errorRate/2, kSamples, 1/mean) + correction
+  // > low
+  // [1] 9396115
+  // > high
+  // [1] 10618100
+  EXPECT_THAT(sum_of_intervals, Ge(9396115));
+  EXPECT_THAT(sum_of_intervals, Le(10618100));
+
+  set_cordz_mean_interval(orig_sample_rate);
+}
+
+#else  // ABSL_INTERNAL_CORDZ_ENABLED
+
+TEST(CordzFunctionsTest, ShouldProfileDisabled) {
+  int32_t orig_sample_rate = get_cordz_mean_interval();
+
+  set_cordz_mean_interval(1);
+  cordz_set_next_sample_for_testing(0);
+  EXPECT_FALSE(cordz_should_profile());
+
+  set_cordz_mean_interval(orig_sample_rate);
+}
+
+#endif  // ABSL_INTERNAL_CORDZ_ENABLED
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_handle.cc b/abseil-cpp/absl/strings/internal/cordz_handle.cc
new file mode 100644
index 0000000..a7061db
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_handle.cc
@@ -0,0 +1,165 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/strings/internal/cordz_handle.h"
+
+#include <atomic>
+
+#include "absl/base/internal/raw_logging.h"  // For ABSL_RAW_CHECK
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+struct Queue {
+  Queue() = default;
+
+  absl::Mutex mutex;
+  std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
+
+  // Returns true if this delete queue is empty. This method does not acquire
+  // the lock, but does a 'load acquire' observation on the delete queue tail.
+  // It is used inside Delete() to check for the presence of a delete queue
+  // without holding the lock. The assumption is that the caller is in the
+  // state of 'being deleted', and can not be newly discovered by a concurrent
+  // 'being constructed' snapshot instance. Practically, this means that any
+  // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
+  // before / after' semantics and atomic fences.
+  bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+    return dq_tail.load(std::memory_order_acquire) == nullptr;
+  }
+};
+
+static Queue* GlobalQueue() {
+  static Queue* global_queue = new Queue;
+  return global_queue;
+}
+
+}  // namespace
+
+CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
+  Queue* global_queue = GlobalQueue();
+  if (is_snapshot) {
+    MutexLock lock(&global_queue->mutex);
+    CordzHandle* dq_tail =
+        global_queue->dq_tail.load(std::memory_order_acquire);
+    if (dq_tail != nullptr) {
+      dq_prev_ = dq_tail;
+      dq_tail->dq_next_ = this;
+    }
+    global_queue->dq_tail.store(this, std::memory_order_release);
+  }
+}
+
+CordzHandle::~CordzHandle() {
+  Queue* global_queue = GlobalQueue();
+  if (is_snapshot_) {
+    std::vector<CordzHandle*> to_delete;
+    {
+      MutexLock lock(&global_queue->mutex);
+      CordzHandle* next = dq_next_;
+      if (dq_prev_ == nullptr) {
+        // We were head of the queue, delete every CordzHandle until we reach
+        // either the end of the list, or a snapshot handle.
+        while (next && !next->is_snapshot_) {
+          to_delete.push_back(next);
+          next = next->dq_next_;
+        }
+      } else {
+        // Another CordzHandle existed before this one, don't delete anything.
+        dq_prev_->dq_next_ = next;
+      }
+      if (next) {
+        next->dq_prev_ = dq_prev_;
+      } else {
+        global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
+      }
+    }
+    for (CordzHandle* handle : to_delete) {
+      delete handle;
+    }
+  }
+}
+
+bool CordzHandle::SafeToDelete() const {
+  return is_snapshot_ || GlobalQueue()->IsEmpty();
+}
+
+void CordzHandle::Delete(CordzHandle* handle) {
+  assert(handle);
+  if (handle) {
+    Queue* const queue = GlobalQueue();
+    if (!handle->SafeToDelete()) {
+      MutexLock lock(&queue->mutex);
+      CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
+      if (dq_tail != nullptr) {
+        handle->dq_prev_ = dq_tail;
+        dq_tail->dq_next_ = handle;
+        queue->dq_tail.store(handle, std::memory_order_release);
+        return;
+      }
+    }
+    delete handle;
+  }
+}
+
+std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
+  std::vector<const CordzHandle*> handles;
+  Queue* global_queue = GlobalQueue();
+  MutexLock lock(&global_queue->mutex);
+  CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
+  for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
+    handles.push_back(p);
+  }
+  return handles;
+}
+
+bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
+    const CordzHandle* handle) const {
+  if (!is_snapshot_) return false;
+  if (handle == nullptr) return true;
+  if (handle->is_snapshot_) return false;
+  bool snapshot_found = false;
+  Queue* global_queue = GlobalQueue();
+  MutexLock lock(&global_queue->mutex);
+  for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
+    if (p == handle) return !snapshot_found;
+    if (p == this) snapshot_found = true;
+  }
+  ABSL_ASSERT(snapshot_found);  // Assert that 'this' is in delete queue.
+  return true;
+}
+
+std::vector<const CordzHandle*>
+CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
+  std::vector<const CordzHandle*> handles;
+  if (!is_snapshot()) {
+    return handles;
+  }
+
+  Queue* global_queue = GlobalQueue();
+  MutexLock lock(&global_queue->mutex);
+  for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
+    if (!p->is_snapshot()) {
+      handles.push_back(p);
+    }
+  }
+  return handles;
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_handle.h b/abseil-cpp/absl/strings/internal/cordz_handle.h
new file mode 100644
index 0000000..08e3f0d
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_handle.h
@@ -0,0 +1,98 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_
+
+#include <atomic>
+#include <vector>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// This base class allows multiple types of object (CordzInfo and
+// CordzSampleToken) to exist simultaneously on the delete queue (pointed to by
+// global_dq_tail and traversed using dq_prev_ and dq_next_). The
+// delete queue guarantees that once a profiler creates a CordzSampleToken and
+// has gained visibility into a CordzInfo object, that CordzInfo object will not
+// be deleted prematurely. This allows the profiler to inspect all CordzInfo
+// objects that are alive without needing to hold a global lock.
+class ABSL_DLL CordzHandle {
+ public:
+  CordzHandle() : CordzHandle(false) {}
+
+  bool is_snapshot() const { return is_snapshot_; }
+
+  // Returns true if this instance is safe to be deleted because it is either a
+  // snapshot, which is always safe to delete, or not included in the global
+  // delete queue and thus not included in any snapshot.
+  // Callers are responsible for making sure this instance can not be newly
+  // discovered by other threads. For example, CordzInfo instances first de-list
+  // themselves from the global CordzInfo list before determining if they are
+  // safe to be deleted directly.
+  // If SafeToDelete returns false, callers MUST use the Delete() method to
+  // safely queue CordzHandle instances for deletion.
+  bool SafeToDelete() const;
+
+  // Deletes the provided instance, or puts it on the delete queue to be deleted
+  // once there are no more sample tokens (snapshot) instances potentially
+  // referencing the instance. `handle` should not be null.
+  static void Delete(CordzHandle* handle);
+
+  // Returns the current entries in the delete queue in LIFO order.
+  static std::vector<const CordzHandle*> DiagnosticsGetDeleteQueue();
+
+  // Returns true if the provided handle is nullptr or guarded by this handle.
+  // Since the CordzSnapshot token is itself a CordzHandle, this method will
+  // allow tests to check if that token is keeping an arbitrary CordzHandle
+  // alive.
+  bool DiagnosticsHandleIsSafeToInspect(const CordzHandle* handle) const;
+
+  // Returns the current entries in the delete queue, in LIFO order, that are
+  // protected by this. CordzHandle objects are only placed on the delete queue
+  // after CordzHandle::Delete is called with them as an argument. Only
+  // CordzHandle objects that are not also CordzSnapshot objects will be
+  // included in the return vector. For each of the handles in the return
+  // vector, the earliest that their memory can be freed is when this
+  // CordzSnapshot object is deleted.
+  std::vector<const CordzHandle*> DiagnosticsGetSafeToInspectDeletedHandles();
+
+ protected:
+  explicit CordzHandle(bool is_snapshot);
+  virtual ~CordzHandle();
+
+ private:
+  const bool is_snapshot_;
+
+  // dq_prev_ and dq_next_ require the global queue mutex to be held.
+  // Unfortunately we can't use thread annotations such that the thread safety
+  // analysis understands that queue_ and global_queue_ are one and the same.
+  CordzHandle* dq_prev_  = nullptr;
+  CordzHandle* dq_next_ = nullptr;
+};
+
+class CordzSnapshot : public CordzHandle {
+ public:
+  CordzSnapshot() : CordzHandle(true) {}
+};
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_handle_test.cc b/abseil-cpp/absl/strings/internal/cordz_handle_test.cc
new file mode 100644
index 0000000..fd68e06
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_handle_test.cc
@@ -0,0 +1,265 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/strings/internal/cordz_handle.h"
+
+#include <random>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/memory/memory.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::Gt;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+// Local less verbose helper
+std::vector<const CordzHandle*> DeleteQueue() {
+  return CordzHandle::DiagnosticsGetDeleteQueue();
+}
+
+struct CordzHandleDeleteTracker : public CordzHandle {
+  bool* deleted;
+  explicit CordzHandleDeleteTracker(bool* deleted) : deleted(deleted) {}
+  ~CordzHandleDeleteTracker() override { *deleted = true; }
+};
+
+TEST(CordzHandleTest, DeleteQueueIsEmpty) {
+  EXPECT_THAT(DeleteQueue(), SizeIs(0));
+}
+
+TEST(CordzHandleTest, CordzHandleCreateDelete) {
+  bool deleted = false;
+  auto* handle = new CordzHandleDeleteTracker(&deleted);
+  EXPECT_FALSE(handle->is_snapshot());
+  EXPECT_TRUE(handle->SafeToDelete());
+  EXPECT_THAT(DeleteQueue(), SizeIs(0));
+
+  CordzHandle::Delete(handle);
+  EXPECT_THAT(DeleteQueue(), SizeIs(0));
+  EXPECT_TRUE(deleted);
+}
+
+TEST(CordzHandleTest, CordzSnapshotCreateDelete) {
+  auto* snapshot = new CordzSnapshot();
+  EXPECT_TRUE(snapshot->is_snapshot());
+  EXPECT_TRUE(snapshot->SafeToDelete());
+  EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot));
+  delete snapshot;
+  EXPECT_THAT(DeleteQueue(), SizeIs(0));
+}
+
+TEST(CordzHandleTest, CordzHandleCreateDeleteWithSnapshot) {
+  bool deleted = false;
+  auto* snapshot = new CordzSnapshot();
+  auto* handle = new CordzHandleDeleteTracker(&deleted);
+  EXPECT_FALSE(handle->SafeToDelete());
+
+  CordzHandle::Delete(handle);
+  EXPECT_THAT(DeleteQueue(), ElementsAre(handle, snapshot));
+  EXPECT_FALSE(deleted);
+  EXPECT_FALSE(handle->SafeToDelete());
+
+  delete snapshot;
+  EXPECT_THAT(DeleteQueue(), SizeIs(0));
+  EXPECT_TRUE(deleted);
+}
+
+TEST(CordzHandleTest, MultiSnapshot) {
+  bool deleted[3] = {false, false, false};
+
+  CordzSnapshot* snapshot[3];
+  CordzHandleDeleteTracker* handle[3];
+  for (int i = 0; i < 3; ++i) {
+    snapshot[i] = new CordzSnapshot();
+    handle[i] = new CordzHandleDeleteTracker(&deleted[i]);
+    CordzHandle::Delete(handle[i]);
+  }
+
+  EXPECT_THAT(DeleteQueue(), ElementsAre(handle[2], snapshot[2], handle[1],
+                                         snapshot[1], handle[0], snapshot[0]));
+  EXPECT_THAT(deleted, ElementsAre(false, false, false));
+
+  delete snapshot[1];
+  EXPECT_THAT(DeleteQueue(), ElementsAre(handle[2], snapshot[2], handle[1],
+                                         handle[0], snapshot[0]));
+  EXPECT_THAT(deleted, ElementsAre(false, false, false));
+
+  delete snapshot[0];
+  EXPECT_THAT(DeleteQueue(), ElementsAre(handle[2], snapshot[2]));
+  EXPECT_THAT(deleted, ElementsAre(true, true, false));
+
+  delete snapshot[2];
+  EXPECT_THAT(DeleteQueue(), SizeIs(0));
+  EXPECT_THAT(deleted, ElementsAre(true, true, deleted));
+}
+
+TEST(CordzHandleTest, DiagnosticsHandleIsSafeToInspect) {
+  CordzSnapshot snapshot1;
+  EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(nullptr));
+
+  auto* handle1 = new CordzHandle();
+  EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
+
+  CordzHandle::Delete(handle1);
+  EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
+
+  CordzSnapshot snapshot2;
+  auto* handle2 = new CordzHandle();
+  EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
+  EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle2));
+  EXPECT_FALSE(snapshot2.DiagnosticsHandleIsSafeToInspect(handle1));
+  EXPECT_TRUE(snapshot2.DiagnosticsHandleIsSafeToInspect(handle2));
+
+  CordzHandle::Delete(handle2);
+  EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
+}
+
+TEST(CordzHandleTest, DiagnosticsGetSafeToInspectDeletedHandles) {
+  EXPECT_THAT(DeleteQueue(), IsEmpty());
+
+  auto* handle = new CordzHandle();
+  auto* snapshot1 = new CordzSnapshot();
+
+  // snapshot1 should be able to see handle.
+  EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot1));
+  EXPECT_TRUE(snapshot1->DiagnosticsHandleIsSafeToInspect(handle));
+  EXPECT_THAT(snapshot1->DiagnosticsGetSafeToInspectDeletedHandles(),
+              IsEmpty());
+
+  // This handle will be safe to inspect as long as snapshot1 is alive. However,
+  // since only snapshot1 can prove that it's alive, it will be hidden from
+  // snapshot2.
+  CordzHandle::Delete(handle);
+
+  // This snapshot shouldn't be able to see handle because handle was already
+  // sent to Delete.
+  auto* snapshot2 = new CordzSnapshot();
+
+  // DeleteQueue elements are LIFO order.
+  EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot2, handle, snapshot1));
+
+  EXPECT_TRUE(snapshot1->DiagnosticsHandleIsSafeToInspect(handle));
+  EXPECT_FALSE(snapshot2->DiagnosticsHandleIsSafeToInspect(handle));
+
+  EXPECT_THAT(snapshot1->DiagnosticsGetSafeToInspectDeletedHandles(),
+              ElementsAre(handle));
+  EXPECT_THAT(snapshot2->DiagnosticsGetSafeToInspectDeletedHandles(),
+              IsEmpty());
+
+  CordzHandle::Delete(snapshot1);
+  EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot2));
+
+  CordzHandle::Delete(snapshot2);
+  EXPECT_THAT(DeleteQueue(), IsEmpty());
+}
+
+// Create and delete CordzHandle and CordzSnapshot objects in multiple threads
+// so that tsan has some time to chew on it and look for memory problems.
+TEST(CordzHandleTest, MultiThreaded) {
+  Notification stop;
+  static constexpr int kNumThreads = 4;
+  // Keep the number of handles relatively small so that the test will naturally
+  // transition to an empty delete queue during the test. If there are, say, 100
+  // handles, that will virtually never happen. With 10 handles and around 50k
+  // iterations in each of 4 threads, the delete queue appears to become empty
+  // around 200 times.
+  static constexpr int kNumHandles = 10;
+
+  // Each thread is going to pick a random index and atomically swap its
+  // CordzHandle with one in handles. This way, each thread can avoid
+  // manipulating a CordzHandle that might be operated upon in another thread.
+  std::vector<std::atomic<CordzHandle*>> handles(kNumHandles);
+
+  // global bool which is set when any thread did get some 'safe to inspect'
+  // handles. On some platforms and OSS tests, we might risk that some pool
+  // threads are starved, stalled, or just got a few unlikely random 'handle'
+  // coin tosses, so we satisfy this test with simply observing 'some' thread
+  // did something meaningful, which should minimize the potential for flakes.
+  std::atomic<bool> found_safe_to_inspect(false);
+
+  {
+    absl::synchronization_internal::ThreadPool pool(kNumThreads);
+    for (int i = 0; i < kNumThreads; ++i) {
+      pool.Schedule([&stop, &handles, &found_safe_to_inspect]() {
+        std::minstd_rand gen;
+        std::uniform_int_distribution<int> dist_type(0, 2);
+        std::uniform_int_distribution<int> dist_handle(0, kNumHandles - 1);
+
+        while (!stop.HasBeenNotified()) {
+          CordzHandle* handle;
+          switch (dist_type(gen)) {
+            case 0:
+              handle = new CordzHandle();
+              break;
+            case 1:
+              handle = new CordzSnapshot();
+              break;
+            default:
+              handle = nullptr;
+              break;
+          }
+          CordzHandle* old_handle = handles[dist_handle(gen)].exchange(handle);
+          if (old_handle != nullptr) {
+            std::vector<const CordzHandle*> safe_to_inspect =
+                old_handle->DiagnosticsGetSafeToInspectDeletedHandles();
+            for (const CordzHandle* handle : safe_to_inspect) {
+              // We're in a tight loop, so don't generate too many error
+              // messages.
+              ASSERT_FALSE(handle->is_snapshot());
+            }
+            if (!safe_to_inspect.empty()) {
+              found_safe_to_inspect.store(true);
+            }
+            CordzHandle::Delete(old_handle);
+          }
+        }
+
+        // Have each thread attempt to clean up everything. Some thread will be
+        // the last to reach this cleanup code, and it will be guaranteed to
+        // clean up everything because nothing remains to create new handles.
+        for (auto& h : handles) {
+          if (CordzHandle* handle = h.exchange(nullptr)) {
+            CordzHandle::Delete(handle);
+          }
+        }
+      });
+    }
+
+    // The threads will hammer away.  Give it a little bit of time for tsan to
+    // spot errors.
+    absl::SleepFor(absl::Seconds(3));
+    stop.Notify();
+  }
+
+  // Confirm that the test did *something*. This check will be satisfied as
+  // long as any thread has deleted a CordzSnapshot object and a non-snapshot
+  // CordzHandle was deleted after the CordzSnapshot was created.
+  // See also comments on `found_safe_to_inspect`
+  EXPECT_TRUE(found_safe_to_inspect.load());
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_info.cc b/abseil-cpp/absl/strings/internal/cordz_info.cc
new file mode 100644
index 0000000..515dfaf
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_info.cc
@@ -0,0 +1,422 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_info.h"
+
+#include "absl/base/config.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/internal/cordz_handle.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/time/clock.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+using ::absl::base_internal::SpinLockHolder;
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordzInfo::kMaxStackDepth;
+#endif
+
+ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit};
+
+namespace {
+
+// CordRepAnalyzer performs the analysis of a cord.
+//
+// It computes absolute node counts and total memory usage, and an 'estimated
+// fair share memory usage` statistic.
+// Conceptually, it divides the 'memory usage' at each location in the 'cord
+// graph' by the cumulative reference count of that location. The cumulative
+// reference count is the factored total of all edges leading into that node.
+//
+// The top level node is treated specially: we assume the current thread
+// (typically called from the CordzHandler) to hold a reference purely to
+// perform a safe analysis, and not being part of the application. So we
+// subtract 1 from the reference count of the top node to compute the
+// 'application fair share' excluding the reference of the current thread.
+//
+// An example of fair sharing, and why we multiply reference counts:
+// Assume we have 2 CordReps, both being a Substring referencing a Flat:
+//   CordSubstring A (refcount = 5) --> child Flat C (refcount = 2)
+//   CordSubstring B (refcount = 9) --> child Flat C (refcount = 2)
+//
+// Flat C has 2 incoming edges from the 2 substrings (refcount = 2) and is not
+// referenced directly anywhere else. Translated into a 'fair share', we then
+// attribute 50% of the memory (memory / refcount = 2) to each incoming edge.
+// Rep A has a refcount of 5, so we attribute each incoming edge 1 / 5th of the
+// memory cost below it, i.e.: the fair share of Rep A of the memory used by C
+// is then 'memory C / (refcount C * refcount A) + (memory A / refcount A)'.
+// It is also easy to see how all incoming edges add up to 100%.
+class CordRepAnalyzer {
+ public:
+  // Creates an analyzer instance binding to `statistics`.
+  explicit CordRepAnalyzer(CordzStatistics& statistics)
+      : statistics_(statistics) {}
+
+  // Analyzes the memory statistics and node counts for the provided `rep`, and
+  // adds the results to `statistics`. Note that node counts and memory sizes
+  // are not initialized, computed values are added to any existing values.
+  void AnalyzeCordRep(const CordRep* rep) {
+    // Process all linear nodes.
+    // As per the class comments, use refcout - 1 on the top level node, as the
+    // top level node is assumed to be referenced only for analysis purposes.
+    size_t refcount = rep->refcount.Get();
+    RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};
+
+    // Process the top level CRC node, if present.
+    if (repref.rep->tag == CRC) {
+      statistics_.node_count++;
+      statistics_.node_counts.crc++;
+      memory_usage_.Add(sizeof(CordRepCrc), repref.refcount);
+      repref = repref.Child(repref.rep->crc()->child);
+    }
+
+    // Process all top level linear nodes (substrings and flats).
+    repref = CountLinearReps(repref, memory_usage_);
+
+    if (repref.rep != nullptr) {
+      if (repref.rep->tag == RING) {
+        AnalyzeRing(repref);
+      } else if (repref.rep->tag == BTREE) {
+        AnalyzeBtree(repref);
+      } else {
+        // We should have either a concat, btree, or ring node if not null.
+        assert(false);
+      }
+    }
+
+    // Adds values to output
+    statistics_.estimated_memory_usage += memory_usage_.total;
+    statistics_.estimated_fair_share_memory_usage +=
+        static_cast<size_t>(memory_usage_.fair_share);
+  }
+
+ private:
+  // RepRef identifies a CordRep* inside the Cord tree with its cumulative
+  // refcount including itself. For example, a tree consisting of a substring
+  // with a refcount of 3 and a child flat with a refcount of 4 will have RepRef
+  // refcounts of 3 and 12 respectively.
+  struct RepRef {
+    const CordRep* rep;
+    size_t refcount;
+
+    // Returns a 'child' RepRef which contains the cumulative reference count of
+    // this instance multiplied by the child's reference count.
+    RepRef Child(const CordRep* child) const {
+      return RepRef{child, refcount * child->refcount.Get()};
+    }
+  };
+
+  // Memory usage values
+  struct MemoryUsage {
+    size_t total = 0;
+    double fair_share = 0.0;
+
+    // Adds 'size` memory usage to this class, with a cumulative (recursive)
+    // reference count of `refcount`
+    void Add(size_t size, size_t refcount) {
+      total += size;
+      fair_share += static_cast<double>(size) / refcount;
+    }
+  };
+
+  // Counts a flat of the provide allocated size
+  void CountFlat(size_t size) {
+    statistics_.node_count++;
+    statistics_.node_counts.flat++;
+    if (size <= 64) {
+      statistics_.node_counts.flat_64++;
+    } else if (size <= 128) {
+      statistics_.node_counts.flat_128++;
+    } else if (size <= 256) {
+      statistics_.node_counts.flat_256++;
+    } else if (size <= 512) {
+      statistics_.node_counts.flat_512++;
+    } else if (size <= 1024) {
+      statistics_.node_counts.flat_1k++;
+    }
+  }
+
+  // Processes 'linear' reps (substring, flat, external) not requiring iteration
+  // or recursion. Returns RefRep{null} if all reps were processed, else returns
+  // the top-most non-linear concat or ring cordrep.
+  // Node counts are updated into `statistics_`, memory usage is update into
+  // `memory_usage`, which typically references `memory_usage_` except for ring
+  // buffers where we count children unrounded.
+  RepRef CountLinearReps(RepRef rep, MemoryUsage& memory_usage) {
+    // Consume all substrings
+    while (rep.rep->tag == SUBSTRING) {
+      statistics_.node_count++;
+      statistics_.node_counts.substring++;
+      memory_usage.Add(sizeof(CordRepSubstring), rep.refcount);
+      rep = rep.Child(rep.rep->substring()->child);
+    }
+
+    // Consume possible FLAT
+    if (rep.rep->tag >= FLAT) {
+      size_t size = rep.rep->flat()->AllocatedSize();
+      CountFlat(size);
+      memory_usage.Add(size, rep.refcount);
+      return RepRef{nullptr, 0};
+    }
+
+    // Consume possible external
+    if (rep.rep->tag == EXTERNAL) {
+      statistics_.node_count++;
+      statistics_.node_counts.external++;
+      size_t size = rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
+      memory_usage.Add(size, rep.refcount);
+      return RepRef{nullptr, 0};
+    }
+
+    return rep;
+  }
+
+  // Analyzes the provided ring.
+  void AnalyzeRing(RepRef rep) {
+    statistics_.node_count++;
+    statistics_.node_counts.ring++;
+    const CordRepRing* ring = rep.rep->ring();
+    memory_usage_.Add(CordRepRing::AllocSize(ring->capacity()), rep.refcount);
+    ring->ForEach([&](CordRepRing::index_type pos) {
+      CountLinearReps(rep.Child(ring->entry_child(pos)), memory_usage_);
+    });
+  }
+
+  // Analyzes the provided btree.
+  void AnalyzeBtree(RepRef rep) {
+    statistics_.node_count++;
+    statistics_.node_counts.btree++;
+    memory_usage_.Add(sizeof(CordRepBtree), rep.refcount);
+    const CordRepBtree* tree = rep.rep->btree();
+    if (tree->height() > 0) {
+      for (CordRep* edge : tree->Edges()) {
+        AnalyzeBtree(rep.Child(edge));
+      }
+    } else {
+      for (CordRep* edge : tree->Edges()) {
+        CountLinearReps(rep.Child(edge), memory_usage_);
+      }
+    }
+  }
+
+  CordzStatistics& statistics_;
+  MemoryUsage memory_usage_;
+};
+
+}  // namespace
+
+CordzInfo* CordzInfo::Head(const CordzSnapshot& snapshot) {
+  ABSL_ASSERT(snapshot.is_snapshot());
+
+  // We can do an 'unsafe' load of 'head', as we are guaranteed that the
+  // instance it points to is kept alive by the provided CordzSnapshot, so we
+  // can simply return the current value using an acquire load.
+  // We do enforce in DEBUG builds that the 'head' value is present in the
+  // delete queue: ODR violations may lead to 'snapshot' and 'global_list_'
+  // being in different libraries / modules.
+  CordzInfo* head = global_list_.head.load(std::memory_order_acquire);
+  ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(head));
+  return head;
+}
+
+CordzInfo* CordzInfo::Next(const CordzSnapshot& snapshot) const {
+  ABSL_ASSERT(snapshot.is_snapshot());
+
+  // Similar to the 'Head()' function, we do not need a mutex here.
+  CordzInfo* next = ci_next_.load(std::memory_order_acquire);
+  ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(this));
+  ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(next));
+  return next;
+}
+
+void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method) {
+  assert(cord.is_tree());
+  assert(!cord.is_profiled());
+  CordzInfo* cordz_info = new CordzInfo(cord.as_tree(), nullptr, method);
+  cord.set_cordz_info(cordz_info);
+  cordz_info->Track();
+}
+
+void CordzInfo::TrackCord(InlineData& cord, const InlineData& src,
+                          MethodIdentifier method) {
+  assert(cord.is_tree());
+  assert(src.is_tree());
+
+  // Unsample current as we the current cord is being replaced with 'src',
+  // so any method history is no longer relevant.
+  CordzInfo* cordz_info = cord.cordz_info();
+  if (cordz_info != nullptr) cordz_info->Untrack();
+
+  // Start new cord sample
+  cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method);
+  cord.set_cordz_info(cordz_info);
+  cordz_info->Track();
+}
+
+void CordzInfo::MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
+                                   MethodIdentifier method) {
+  if (src.is_profiled()) {
+    TrackCord(cord, src, method);
+  } else if (cord.is_profiled()) {
+    cord.cordz_info()->Untrack();
+    cord.clear_cordz_info();
+  }
+}
+
+CordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) {
+  if (src == nullptr) return MethodIdentifier::kUnknown;
+  return src->parent_method_ != MethodIdentifier::kUnknown ? src->parent_method_
+                                                           : src->method_;
+}
+
+size_t CordzInfo::FillParentStack(const CordzInfo* src, void** stack) {
+  assert(stack);
+  if (src == nullptr) return 0;
+  if (src->parent_stack_depth_) {
+    memcpy(stack, src->parent_stack_, src->parent_stack_depth_ * sizeof(void*));
+    return src->parent_stack_depth_;
+  }
+  memcpy(stack, src->stack_, src->stack_depth_ * sizeof(void*));
+  return src->stack_depth_;
+}
+
+CordzInfo::CordzInfo(CordRep* rep,
+                     const CordzInfo* src,
+                     MethodIdentifier method)
+    : rep_(rep),
+      stack_depth_(
+          static_cast<size_t>(absl::GetStackTrace(stack_,
+                                                  /*max_depth=*/kMaxStackDepth,
+                                                  /*skip_count=*/1))),
+      parent_stack_depth_(FillParentStack(src, parent_stack_)),
+      method_(method),
+      parent_method_(GetParentMethod(src)),
+      create_time_(absl::Now()) {
+  update_tracker_.LossyAdd(method);
+  if (src) {
+    // Copy parent counters.
+    update_tracker_.LossyAdd(src->update_tracker_);
+  }
+}
+
+CordzInfo::~CordzInfo() {
+  // `rep_` is potentially kept alive if CordzInfo is included
+  // in a collection snapshot (which should be rare).
+  if (ABSL_PREDICT_FALSE(rep_)) {
+    CordRep::Unref(rep_);
+  }
+}
+
+void CordzInfo::Track() {
+  SpinLockHolder l(&list_->mutex);
+
+  CordzInfo* const head = list_->head.load(std::memory_order_acquire);
+  if (head != nullptr) {
+    head->ci_prev_.store(this, std::memory_order_release);
+  }
+  ci_next_.store(head, std::memory_order_release);
+  list_->head.store(this, std::memory_order_release);
+}
+
+void CordzInfo::Untrack() {
+  ODRCheck();
+  {
+    SpinLockHolder l(&list_->mutex);
+
+    CordzInfo* const head = list_->head.load(std::memory_order_acquire);
+    CordzInfo* const next = ci_next_.load(std::memory_order_acquire);
+    CordzInfo* const prev = ci_prev_.load(std::memory_order_acquire);
+
+    if (next) {
+      ABSL_ASSERT(next->ci_prev_.load(std::memory_order_acquire) == this);
+      next->ci_prev_.store(prev, std::memory_order_release);
+    }
+    if (prev) {
+      ABSL_ASSERT(head != this);
+      ABSL_ASSERT(prev->ci_next_.load(std::memory_order_acquire) == this);
+      prev->ci_next_.store(next, std::memory_order_release);
+    } else {
+      ABSL_ASSERT(head == this);
+      list_->head.store(next, std::memory_order_release);
+    }
+  }
+
+  // We can no longer be discovered: perform a fast path check if we are not
+  // listed on any delete queue, so we can directly delete this instance.
+  if (SafeToDelete()) {
+    UnsafeSetCordRep(nullptr);
+    delete this;
+    return;
+  }
+
+  // We are likely part of a snapshot, extend the life of the CordRep
+  {
+    absl::MutexLock lock(&mutex_);
+    if (rep_) CordRep::Ref(rep_);
+  }
+  CordzHandle::Delete(this);
+}
+
+void CordzInfo::Lock(MethodIdentifier method)
+    ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_) {
+  mutex_.Lock();
+  update_tracker_.LossyAdd(method);
+  assert(rep_);
+}
+
+void CordzInfo::Unlock() ABSL_UNLOCK_FUNCTION(mutex_) {
+  bool tracked = rep_ != nullptr;
+  mutex_.Unlock();
+  if (!tracked) {
+    Untrack();
+  }
+}
+
+absl::Span<void* const> CordzInfo::GetStack() const {
+  return absl::MakeConstSpan(stack_, stack_depth_);
+}
+
+absl::Span<void* const> CordzInfo::GetParentStack() const {
+  return absl::MakeConstSpan(parent_stack_, parent_stack_depth_);
+}
+
+CordzStatistics CordzInfo::GetCordzStatistics() const {
+  CordzStatistics stats;
+  stats.method = method_;
+  stats.parent_method = parent_method_;
+  stats.update_tracker = update_tracker_;
+  if (CordRep* rep = RefCordRep()) {
+    stats.size = rep->length;
+    CordRepAnalyzer analyzer(stats);
+    analyzer.AnalyzeCordRep(rep);
+    CordRep::Unref(rep);
+  }
+  return stats;
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_info.h b/abseil-cpp/absl/strings/internal/cordz_info.h
new file mode 100644
index 0000000..17eaa91
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_info.h
@@ -0,0 +1,298 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_
+
+#include <atomic>
+#include <cstdint>
+#include <functional>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cordz_functions.h"
+#include "absl/strings/internal/cordz_handle.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzInfo tracks a profiled Cord. Each of these objects can be in two places.
+// If a Cord is alive, the CordzInfo will be in the global_cordz_infos map, and
+// can also be retrieved via the linked list starting with
+// global_cordz_infos_head and continued via the cordz_info_next() method. When
+// a Cord has reached the end of its lifespan, the CordzInfo object will be
+// migrated out of the global_cordz_infos list and the global_cordz_infos_map,
+// and will either be deleted or appended to the global_delete_queue. If it is
+// placed on the global_delete_queue, the CordzInfo object will be cleaned in
+// the destructor of a CordzSampleToken object.
+class ABSL_LOCKABLE CordzInfo : public CordzHandle {
+ public:
+  using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
+
+  // TrackCord creates a CordzInfo instance which tracks important metrics of
+  // a sampled cord, and stores the created CordzInfo instance into `cord'. All
+  // CordzInfo instances are placed in a global list which is used to discover
+  // and snapshot all actively tracked cords. Callers are responsible for
+  // calling UntrackCord() before the tracked Cord instance is deleted, or to
+  // stop tracking the sampled Cord. Callers are also responsible for guarding
+  // changes to the 'tree' value of a Cord (InlineData.tree) through the Lock()
+  // and Unlock() calls. Any change resulting in a new tree value for the cord
+  // requires a call to SetCordRep() before the old tree has been unreffed
+  // and/or deleted. `method` identifies the Cord public API method initiating
+  // the cord to be sampled.
+  // Requires `cord` to hold a tree, and `cord.cordz_info()` to be null.
+  static void TrackCord(InlineData& cord, MethodIdentifier method);
+
+  // Identical to TrackCord(), except that this function fills the
+  // `parent_stack` and `parent_method` properties of the returned CordzInfo
+  // instance from the provided `src` instance if `src` is sampled.
+  // This function should be used for sampling 'copy constructed' and 'copy
+  // assigned' cords. This function allows 'cord` to be already sampled, in
+  // which case the CordzInfo will be newly created from `src`.
+  static void TrackCord(InlineData& cord, const InlineData& src,
+                        MethodIdentifier method);
+
+  // Maybe sample the cord identified by 'cord' for method 'method'.
+  // Uses `cordz_should_profile` to randomly pick cords to be sampled, and if
+  // so, invokes `TrackCord` to start sampling `cord`.
+  static void MaybeTrackCord(InlineData& cord, MethodIdentifier method);
+
+  // Maybe sample the cord identified by 'cord' for method 'method'.
+  // `src` identifies a 'parent' cord which is assigned to `cord`, typically the
+  // input cord for a copy constructor, or an assign method such as `operator=`
+  // `cord` will be sampled if (and only if) `src` is sampled.
+  // If `cord` is currently being sampled and `src` is not being sampled, then
+  // this function will stop sampling the cord and reset the cord's cordz_info.
+  //
+  // Previously this function defined that `cord` will be sampled if either
+  // `src` is sampled, or if `cord` is randomly picked for sampling. However,
+  // this can cause issues, as there may be paths where some cord is assigned an
+  // indirect copy of it's own value. As such a 'string of copies' would then
+  // remain sampled (`src.is_profiled`), then assigning such a cord back to
+  // 'itself' creates a cycle where the cord will converge to 'always sampled`.
+  //
+  // For example:
+  //
+  //   Cord x;
+  //   for (...) {
+  //     // Copy ctor --> y.is_profiled := x.is_profiled | random(...)
+  //     Cord y = x;
+  //     ...
+  //     // Assign x = y --> x.is_profiled = y.is_profiled | random(...)
+  //     //              ==> x.is_profiled |= random(...)
+  //     //              ==> x converges to 'always profiled'
+  //     x = y;
+  //   }
+  static void MaybeTrackCord(InlineData& cord, const InlineData& src,
+                             MethodIdentifier method);
+
+  // Stops tracking changes for a sampled cord, and deletes the provided info.
+  // This function must be called before the sampled cord instance is deleted,
+  // and before the root cordrep of the sampled cord is unreffed.
+  // This function may extend the lifetime of the cordrep in cases where the
+  // CordInfo instance is being held by a concurrent collection thread.
+  void Untrack();
+
+  // Invokes UntrackCord() on `info` if `info` is not null.
+  static void MaybeUntrackCord(CordzInfo* info);
+
+  CordzInfo() = delete;
+  CordzInfo(const CordzInfo&) = delete;
+  CordzInfo& operator=(const CordzInfo&) = delete;
+
+  // Retrieves the oldest existing CordzInfo.
+  static CordzInfo* Head(const CordzSnapshot& snapshot)
+      ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+  // Retrieves the next oldest existing CordzInfo older than 'this' instance.
+  CordzInfo* Next(const CordzSnapshot& snapshot) const
+      ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+  // Locks this instance for the update identified by `method`.
+  // Increases the count for `method` in `update_tracker`.
+  void Lock(MethodIdentifier method) ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_);
+
+  // Unlocks this instance. If the contained `rep` has been set to null
+  // indicating the Cord has been cleared or is otherwise no longer sampled,
+  // then this method will delete this CordzInfo instance.
+  void Unlock() ABSL_UNLOCK_FUNCTION(mutex_);
+
+  // Asserts that this CordzInfo instance is locked.
+  void AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_);
+
+  // Updates the `rep` property of this instance. This methods is invoked by
+  // Cord logic each time the root node of a sampled Cord changes, and before
+  // the old root reference count is deleted. This guarantees that collection
+  // code can always safely take a reference on the tracked cord.
+  // Requires a lock to be held through the `Lock()` method.
+  // TODO(b/117940323): annotate with ABSL_EXCLUSIVE_LOCKS_REQUIRED once all
+  // Cord code is in a state where this can be proven true by the compiler.
+  void SetCordRep(CordRep* rep);
+
+  // Returns the current `rep` property of this instance with a reference
+  // added, or null if this instance represents a cord that has since been
+  // deleted or untracked.
+  CordRep* RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_);
+
+  // Returns the current value of `rep_` for testing purposes only.
+  CordRep* GetCordRepForTesting() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+    return rep_;
+  }
+
+  // Sets the current value of `rep_` for testing purposes only.
+  void SetCordRepForTesting(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS {
+    rep_ = rep;
+  }
+
+  // Returns the stack trace for where the cord was first sampled. Cords are
+  // potentially sampled when they promote from an inlined cord to a tree or
+  // ring representation, which is not necessarily the location where the cord
+  // was first created. Some cords are created as inlined cords, and only as
+  // data is added do they become a non-inlined cord. However, typically the
+  // location represents reasonably well where the cord is 'created'.
+  absl::Span<void* const> GetStack() const;
+
+  // Returns the stack trace for a sampled cord's 'parent stack trace'. This
+  // value may be set if the cord is sampled (promoted) after being created
+  // from, or being assigned the value of an existing (sampled) cord.
+  absl::Span<void* const> GetParentStack() const;
+
+  // Retrieves the CordzStatistics associated with this Cord. The statistics
+  // are only updated when a Cord goes through a mutation, such as an Append
+  // or RemovePrefix.
+  CordzStatistics GetCordzStatistics() const;
+
+ private:
+  using SpinLock = absl::base_internal::SpinLock;
+  using SpinLockHolder = ::absl::base_internal::SpinLockHolder;
+
+  // Global cordz info list. CordzInfo stores a pointer to the global list
+  // instance to harden against ODR violations.
+  struct List {
+    constexpr explicit List(absl::ConstInitType)
+        : mutex(absl::kConstInit,
+                absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
+
+    SpinLock mutex;
+    std::atomic<CordzInfo*> head ABSL_GUARDED_BY(mutex){nullptr};
+  };
+
+  static constexpr size_t kMaxStackDepth = 64;
+
+  explicit CordzInfo(CordRep* rep, const CordzInfo* src,
+                     MethodIdentifier method);
+  ~CordzInfo() override;
+
+  // Sets `rep_` without holding a lock.
+  void UnsafeSetCordRep(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+  void Track();
+
+  // Returns the parent method from `src`, which is either `parent_method_` or
+  // `method_` depending on `parent_method_` being kUnknown.
+  // Returns kUnknown if `src` is null.
+  static MethodIdentifier GetParentMethod(const CordzInfo* src);
+
+  // Fills the provided stack from `src`, copying either `parent_stack_` or
+  // `stack_` depending on `parent_stack_` being empty, returning the size of
+  // the parent stack.
+  // Returns 0 if `src` is null.
+  static size_t FillParentStack(const CordzInfo* src, void** stack);
+
+  void ODRCheck() const {
+#ifndef NDEBUG
+    ABSL_RAW_CHECK(list_ == &global_list_, "ODR violation in Cord");
+#endif
+  }
+
+  // Non-inlined implementation of `MaybeTrackCord`, which is executed if
+  // either `src` is sampled or `cord` is sampled, and either untracks or
+  // tracks `cord` as documented per `MaybeTrackCord`.
+  static void MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
+                                 MethodIdentifier method);
+
+  ABSL_CONST_INIT static List global_list_;
+  List* const list_ = &global_list_;
+
+  // ci_prev_ and ci_next_ require the global list mutex to be held.
+  // Unfortunately we can't use thread annotations such that the thread safety
+  // analysis understands that list_ and global_list_ are one and the same.
+  std::atomic<CordzInfo*> ci_prev_{nullptr};
+  std::atomic<CordzInfo*> ci_next_{nullptr};
+
+  mutable absl::Mutex mutex_;
+  CordRep* rep_ ABSL_GUARDED_BY(mutex_);
+
+  void* stack_[kMaxStackDepth];
+  void* parent_stack_[kMaxStackDepth];
+  const size_t stack_depth_;
+  const size_t parent_stack_depth_;
+  const MethodIdentifier method_;
+  const MethodIdentifier parent_method_;
+  CordzUpdateTracker update_tracker_;
+  const absl::Time create_time_;
+};
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
+    InlineData& cord, MethodIdentifier method) {
+  if (ABSL_PREDICT_FALSE(cordz_should_profile())) {
+    TrackCord(cord, method);
+  }
+}
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
+    InlineData& cord, const InlineData& src, MethodIdentifier method) {
+  if (ABSL_PREDICT_FALSE(InlineData::is_either_profiled(cord, src))) {
+    MaybeTrackCordImpl(cord, src, method);
+  }
+}
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeUntrackCord(
+    CordzInfo* info) {
+  if (ABSL_PREDICT_FALSE(info)) {
+    info->Untrack();
+  }
+}
+
+inline void CordzInfo::AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_) {
+#ifndef NDEBUG
+  mutex_.AssertHeld();
+#endif
+}
+
+inline void CordzInfo::SetCordRep(CordRep* rep) {
+  AssertHeld();
+  rep_ = rep;
+}
+
+inline void CordzInfo::UnsafeSetCordRep(CordRep* rep) { rep_ = rep; }
+
+inline CordRep* CordzInfo::RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_) {
+  MutexLock lock(&mutex_);
+  return rep_ ? CordRep::Ref(rep_) : nullptr;
+}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc b/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc
new file mode 100644
index 0000000..53d2f2e
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc
@@ -0,0 +1,557 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <iostream>
+#include <random>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/crc/internal/crc_cord_state.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/strings/internal/cordz_sample_token.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_scope.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/notification.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Do not print statistics contents, the matcher prints them as needed.
+inline void PrintTo(const CordzStatistics& stats, std::ostream* s) {
+  if (s) *s << "CordzStatistics{...}";
+}
+
+namespace {
+
+using ::testing::Ge;
+
+// Creates a flat of the specified allocated size
+CordRepFlat* Flat(size_t size) {
+  // Round up to a tag size, as we are going to poke an exact tag size back into
+  // the allocated flat. 'size returning allocators' could grant us more than we
+  // wanted, but we are ok to poke the 'requested' size in the tag, even in the
+  // presence of sized deletes, so we need to make sure the size rounds
+  // perfectly to a tag value.
+  assert(size >= kMinFlatSize);
+  size = RoundUpForTag(size);
+  CordRepFlat* flat = CordRepFlat::New(size - kFlatOverhead);
+  flat->tag = AllocatedSizeToTag(size);
+  flat->length = size - kFlatOverhead;
+  return flat;
+}
+
+// Creates an external of the specified length
+CordRepExternal* External(size_t length = 512) {
+  return static_cast<CordRepExternal*>(
+      NewExternalRep(absl::string_view("", length), [](absl::string_view) {}));
+}
+
+// Creates a substring on the provided rep of length - 1
+CordRepSubstring* Substring(CordRep* rep) {
+  auto* substring = new CordRepSubstring;
+  substring->length = rep->length - 1;
+  substring->tag = SUBSTRING;
+  substring->child = rep;
+  return substring;
+}
+
+// Reference count helper
+struct RefHelper {
+  std::vector<CordRep*> refs;
+
+  ~RefHelper() {
+    for (CordRep* rep : refs) {
+      CordRep::Unref(rep);
+    }
+  }
+
+  // Invokes CordRep::Unref() on `rep` when this instance is destroyed.
+  template <typename T>
+  T* NeedsUnref(T* rep) {
+    refs.push_back(rep);
+    return rep;
+  }
+
+  // Adds `n` reference counts to `rep` which will be unreffed when this
+  // instance is destroyed.
+  template <typename T>
+  T* Ref(T* rep, size_t n = 1) {
+    while (n--) {
+      NeedsUnref(CordRep::Ref(rep));
+    }
+    return rep;
+  }
+};
+
+// Sizeof helper. Returns the allocated size of `p`, excluding any child
+// elements for substring, concat and ring cord reps.
+template <typename T>
+size_t SizeOf(const T* rep) {
+  return sizeof(T);
+}
+
+template <>
+size_t SizeOf(const CordRepFlat* rep) {
+  return rep->AllocatedSize();
+}
+
+template <>
+size_t SizeOf(const CordRepExternal* rep) {
+  // See cord.cc
+  return sizeof(CordRepExternalImpl<intptr_t>) + rep->length;
+}
+
+template <>
+size_t SizeOf(const CordRepRing* rep) {
+  return CordRepRing::AllocSize(rep->capacity());
+}
+
+// Computes fair share memory used in a naive 'we dare to recurse' way.
+double FairShareImpl(CordRep* rep, size_t ref) {
+  double self = 0.0, children = 0.0;
+  ref *= rep->refcount.Get();
+  if (rep->tag >= FLAT) {
+    self = SizeOf(rep->flat());
+  } else if (rep->tag == EXTERNAL) {
+    self = SizeOf(rep->external());
+  } else if (rep->tag == SUBSTRING) {
+    self = SizeOf(rep->substring());
+    children = FairShareImpl(rep->substring()->child, ref);
+  } else if (rep->tag == BTREE) {
+    self = SizeOf(rep->btree());
+    for (CordRep*edge : rep->btree()->Edges()) {
+      children += FairShareImpl(edge, ref);
+    }
+  } else if (rep->tag == RING) {
+    self = SizeOf(rep->ring());
+    rep->ring()->ForEach([&](CordRepRing::index_type i) {
+      self += FairShareImpl(rep->ring()->entry_child(i), 1);
+    });
+  } else {
+    assert(false);
+  }
+  return self / ref + children;
+}
+
+// Returns the fair share memory size from `ShareFhareImpl()` as a size_t.
+size_t FairShare(CordRep* rep, size_t ref = 1) {
+  return static_cast<size_t>(FairShareImpl(rep, ref));
+}
+
+// Samples the cord and returns CordzInfo::GetStatistics()
+CordzStatistics SampleCord(CordRep* rep) {
+  InlineData cord(rep);
+  CordzInfo::TrackCord(cord, CordzUpdateTracker::kUnknown);
+  CordzStatistics stats = cord.cordz_info()->GetCordzStatistics();
+  cord.cordz_info()->Untrack();
+  return stats;
+}
+
+MATCHER_P(EqStatistics, stats, "Statistics equal expected values") {
+  bool ok = true;
+
+#define STATS_MATCHER_EXPECT_EQ(member)                              \
+  if (stats.member != arg.member) {                                  \
+    *result_listener << "\n    stats." << #member                    \
+                     << ": actual = " << arg.member << ", expected " \
+                     << stats.member;                                \
+    ok = false;                                                      \
+  }
+
+  STATS_MATCHER_EXPECT_EQ(size);
+  STATS_MATCHER_EXPECT_EQ(node_count);
+  STATS_MATCHER_EXPECT_EQ(node_counts.flat);
+  STATS_MATCHER_EXPECT_EQ(node_counts.flat_64);
+  STATS_MATCHER_EXPECT_EQ(node_counts.flat_128);
+  STATS_MATCHER_EXPECT_EQ(node_counts.flat_256);
+  STATS_MATCHER_EXPECT_EQ(node_counts.flat_512);
+  STATS_MATCHER_EXPECT_EQ(node_counts.flat_1k);
+  STATS_MATCHER_EXPECT_EQ(node_counts.external);
+  STATS_MATCHER_EXPECT_EQ(node_counts.concat);
+  STATS_MATCHER_EXPECT_EQ(node_counts.substring);
+  STATS_MATCHER_EXPECT_EQ(node_counts.ring);
+  STATS_MATCHER_EXPECT_EQ(node_counts.btree);
+  STATS_MATCHER_EXPECT_EQ(estimated_memory_usage);
+  STATS_MATCHER_EXPECT_EQ(estimated_fair_share_memory_usage);
+
+#undef STATS_MATCHER_EXPECT_EQ
+
+  return ok;
+}
+
+TEST(CordzInfoStatisticsTest, Flat) {
+  RefHelper ref;
+  auto* flat = ref.NeedsUnref(Flat(512));
+
+  CordzStatistics expected;
+  expected.size = flat->length;
+  expected.estimated_memory_usage = SizeOf(flat);
+  expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
+  expected.node_count = 1;
+  expected.node_counts.flat = 1;
+  expected.node_counts.flat_512 = 1;
+
+  EXPECT_THAT(SampleCord(flat), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, SharedFlat) {
+  RefHelper ref;
+  auto* flat = ref.Ref(ref.NeedsUnref(Flat(64)));
+
+  CordzStatistics expected;
+  expected.size = flat->length;
+  expected.estimated_memory_usage = SizeOf(flat);
+  expected.estimated_fair_share_memory_usage = SizeOf(flat) / 2;
+  expected.node_count = 1;
+  expected.node_counts.flat = 1;
+  expected.node_counts.flat_64 = 1;
+
+  EXPECT_THAT(SampleCord(flat), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, External) {
+  RefHelper ref;
+  auto* external = ref.NeedsUnref(External());
+
+  CordzStatistics expected;
+  expected.size = external->length;
+  expected.estimated_memory_usage = SizeOf(external);
+  expected.estimated_fair_share_memory_usage = SizeOf(external);
+  expected.node_count = 1;
+  expected.node_counts.external = 1;
+
+  EXPECT_THAT(SampleCord(external), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, SharedExternal) {
+  RefHelper ref;
+  auto* external = ref.Ref(ref.NeedsUnref(External()));
+
+  CordzStatistics expected;
+  expected.size = external->length;
+  expected.estimated_memory_usage = SizeOf(external);
+  expected.estimated_fair_share_memory_usage = SizeOf(external) / 2;
+  expected.node_count = 1;
+  expected.node_counts.external = 1;
+
+  EXPECT_THAT(SampleCord(external), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, Substring) {
+  RefHelper ref;
+  auto* flat = Flat(1024);
+  auto* substring = ref.NeedsUnref(Substring(flat));
+
+  CordzStatistics expected;
+  expected.size = substring->length;
+  expected.estimated_memory_usage = SizeOf(substring) + SizeOf(flat);
+  expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
+  expected.node_count = 2;
+  expected.node_counts.flat = 1;
+  expected.node_counts.flat_1k = 1;
+  expected.node_counts.substring = 1;
+
+  EXPECT_THAT(SampleCord(substring), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, SharedSubstring) {
+  RefHelper ref;
+  auto* flat = ref.Ref(Flat(511), 2);
+  auto* substring = ref.Ref(ref.NeedsUnref(Substring(flat)));
+
+  CordzStatistics expected;
+  expected.size = substring->length;
+  expected.estimated_memory_usage = SizeOf(flat) + SizeOf(substring);
+  expected.estimated_fair_share_memory_usage =
+      SizeOf(substring) / 2 + SizeOf(flat) / 6;
+  expected.node_count = 2;
+  expected.node_counts.flat = 1;
+  expected.node_counts.flat_512 = 1;
+  expected.node_counts.substring = 1;
+
+  EXPECT_THAT(SampleCord(substring), EqStatistics(expected));
+}
+
+
+TEST(CordzInfoStatisticsTest, Ring) {
+  RefHelper ref;
+  auto* flat1 = Flat(240);
+  auto* flat2 = Flat(2000);
+  auto* flat3 = Flat(70);
+  auto* external = External(3000);
+  CordRepRing* ring = CordRepRing::Create(flat1);
+  ring = CordRepRing::Append(ring, flat2);
+  ring = CordRepRing::Append(ring, flat3);
+  ring = ref.NeedsUnref(CordRepRing::Append(ring, external));
+
+  CordzStatistics expected;
+  expected.size = ring->length;
+  expected.estimated_memory_usage = SizeOf(ring) + SizeOf(flat1) +
+                                    SizeOf(flat2) + SizeOf(flat3) +
+                                    SizeOf(external);
+  expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
+  expected.node_count = 5;
+  expected.node_counts.flat = 3;
+  expected.node_counts.flat_128 = 1;
+  expected.node_counts.flat_256 = 1;
+  expected.node_counts.external = 1;
+  expected.node_counts.ring = 1;
+
+  EXPECT_THAT(SampleCord(ring), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, SharedSubstringRing) {
+  RefHelper ref;
+  auto* flat1 = ref.Ref(Flat(240));
+  auto* flat2 = Flat(200);
+  auto* flat3 = Flat(70);
+  auto* external = ref.Ref(External(3000), 5);
+  CordRepRing* ring = CordRepRing::Create(flat1);
+  ring = CordRepRing::Append(ring, flat2);
+  ring = CordRepRing::Append(ring, flat3);
+  ring = ref.Ref(CordRepRing::Append(ring, external), 4);
+  auto* substring = ref.Ref(ref.NeedsUnref(Substring(ring)));
+
+
+  CordzStatistics expected;
+  expected.size = substring->length;
+  expected.estimated_memory_usage = SizeOf(ring) + SizeOf(flat1) +
+                                    SizeOf(flat2) + SizeOf(flat3) +
+                                    SizeOf(external) + SizeOf(substring);
+  expected.estimated_fair_share_memory_usage = FairShare(substring);
+  expected.node_count = 6;
+  expected.node_counts.flat = 3;
+  expected.node_counts.flat_128 = 1;
+  expected.node_counts.flat_256 = 2;
+  expected.node_counts.external = 1;
+  expected.node_counts.ring = 1;
+  expected.node_counts.substring = 1;
+
+  EXPECT_THAT(SampleCord(substring), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, BtreeLeaf) {
+  ASSERT_THAT(CordRepBtree::kMaxCapacity, Ge(3u));
+  RefHelper ref;
+  auto* flat1 = Flat(2000);
+  auto* flat2 = Flat(200);
+  auto* substr = Substring(flat2);
+  auto* external = External(3000);
+
+  CordRepBtree* tree = CordRepBtree::Create(flat1);
+  tree = CordRepBtree::Append(tree, substr);
+  tree = CordRepBtree::Append(tree, external);
+  size_t flat3_count = CordRepBtree::kMaxCapacity - 3;
+  size_t flat3_size = 0;
+  for (size_t i = 0; i < flat3_count; ++i) {
+    auto* flat3 = Flat(70);
+    flat3_size += SizeOf(flat3);
+    tree = CordRepBtree::Append(tree, flat3);
+  }
+  ref.NeedsUnref(tree);
+
+  CordzStatistics expected;
+  expected.size = tree->length;
+  expected.estimated_memory_usage = SizeOf(tree) + SizeOf(flat1) +
+                                    SizeOf(flat2) + SizeOf(substr) +
+                                    flat3_size + SizeOf(external);
+  expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
+  expected.node_count = 1 + 3 + 1 + flat3_count;
+  expected.node_counts.flat = 2 + flat3_count;
+  expected.node_counts.flat_128 = flat3_count;
+  expected.node_counts.flat_256 = 1;
+  expected.node_counts.external = 1;
+  expected.node_counts.substring = 1;
+  expected.node_counts.btree = 1;
+
+  EXPECT_THAT(SampleCord(tree), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, BtreeNodeShared) {
+  RefHelper ref;
+  static constexpr int leaf_count = 3;
+  const size_t flat3_count = CordRepBtree::kMaxCapacity - 3;
+  ASSERT_THAT(flat3_count, Ge(0u));
+
+  CordRepBtree* tree = nullptr;
+  size_t mem_size = 0;
+  for (int i = 0; i < leaf_count; ++i) {
+    auto* flat1 = ref.Ref(Flat(2000), 9);
+    mem_size += SizeOf(flat1);
+    if (i == 0) {
+      tree = CordRepBtree::Create(flat1);
+    } else {
+      tree = CordRepBtree::Append(tree, flat1);
+    }
+
+    auto* flat2 = Flat(200);
+    auto* substr = Substring(flat2);
+    mem_size += SizeOf(flat2) + SizeOf(substr);
+    tree = CordRepBtree::Append(tree, substr);
+
+    auto* external = External(30);
+    mem_size += SizeOf(external);
+    tree = CordRepBtree::Append(tree, external);
+
+    for (size_t i = 0; i < flat3_count; ++i) {
+      auto* flat3 = Flat(70);
+      mem_size += SizeOf(flat3);
+      tree = CordRepBtree::Append(tree, flat3);
+    }
+
+    if (i == 0) {
+      mem_size += SizeOf(tree);
+    } else {
+      mem_size += SizeOf(tree->Edges().back()->btree());
+    }
+  }
+  ref.NeedsUnref(tree);
+
+  // Ref count: 2 for top (add 1), 5 for leaf 0 (add 4).
+  ref.Ref(tree, 1);
+  ref.Ref(tree->Edges().front(), 4);
+
+  CordzStatistics expected;
+  expected.size = tree->length;
+  expected.estimated_memory_usage = SizeOf(tree) + mem_size;
+  expected.estimated_fair_share_memory_usage = FairShare(tree);
+
+  expected.node_count = 1 + leaf_count * (1 + 3 + 1 + flat3_count);
+  expected.node_counts.flat = leaf_count * (2 + flat3_count);
+  expected.node_counts.flat_128 = leaf_count * flat3_count;
+  expected.node_counts.flat_256 = leaf_count;
+  expected.node_counts.external = leaf_count;
+  expected.node_counts.substring = leaf_count;
+  expected.node_counts.btree = 1 + leaf_count;
+
+  EXPECT_THAT(SampleCord(tree), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, Crc) {
+  RefHelper ref;
+  auto* left = Flat(1000);
+  auto* crc =
+      ref.NeedsUnref(CordRepCrc::New(left, crc_internal::CrcCordState()));
+
+  CordzStatistics expected;
+  expected.size = left->length;
+  expected.estimated_memory_usage = SizeOf(crc) + SizeOf(left);
+  expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
+  expected.node_count = 2;
+  expected.node_counts.flat = 1;
+  expected.node_counts.flat_1k = 1;
+  expected.node_counts.crc = 1;
+
+  EXPECT_THAT(SampleCord(crc), EqStatistics(expected));
+}
+
+TEST(CordzInfoStatisticsTest, ThreadSafety) {
+  Notification stop;
+  static constexpr int kNumThreads = 8;
+  int64_t sampled_node_count = 0;
+
+  {
+    absl::synchronization_internal::ThreadPool pool(kNumThreads);
+
+    // Run analyzer thread emulating a CordzHandler collection.
+    pool.Schedule([&]() {
+      while (!stop.HasBeenNotified()) {
+        // Run every 10us (about 100K total collections).
+        absl::SleepFor(absl::Microseconds(10));
+        CordzSampleToken token;
+        for (const CordzInfo& cord_info : token) {
+          CordzStatistics stats = cord_info.GetCordzStatistics();
+          sampled_node_count += stats.node_count;
+        }
+      }
+    });
+
+    // Run 'application threads'
+    for (int i = 0; i < kNumThreads; ++i) {
+      pool.Schedule([&]() {
+        // Track 0 - 2 cordz infos at a time, providing permutations of 0, 1
+        // and 2 CordzHandle and CordzInfo queues being active, with plenty of
+        // 'empty to non empty' transitions.
+        InlineData cords[2];
+        std::minstd_rand gen;
+        std::uniform_int_distribution<int> coin_toss(0, 1);
+
+        while (!stop.HasBeenNotified()) {
+          for (InlineData& cord : cords) {
+            // 50/50 flip the state of the cord
+            if (coin_toss(gen) != 0) {
+              if (cord.is_tree()) {
+                // 50/50 simulate delete (untrack) or 'edit to empty'
+                if (coin_toss(gen) != 0) {
+                  CordzInfo::MaybeUntrackCord(cord.cordz_info());
+                } else {
+                  CordzUpdateScope scope(cord.cordz_info(),
+                                         CordzUpdateTracker::kUnknown);
+                  scope.SetCordRep(nullptr);
+                }
+                CordRep::Unref(cord.as_tree());
+                cord.set_inline_size(0);
+              } else {
+                // Coin toss to 25% ring, 25% btree, and 50% flat.
+                CordRep* rep = Flat(256);
+                if (coin_toss(gen) != 0) {
+                  if (coin_toss(gen) != 0) {
+                    rep = CordRepRing::Create(rep);
+                  } else {
+                    rep = CordRepBtree::Create(rep);
+                  }
+                }
+                cord.make_tree(rep);
+
+                // 50/50 sample
+                if (coin_toss(gen) != 0) {
+                  CordzInfo::TrackCord(cord, CordzUpdateTracker::kUnknown);
+                }
+              }
+            }
+          }
+        }
+        for (InlineData& cord : cords) {
+          if (cord.is_tree()) {
+            CordzInfo::MaybeUntrackCord(cord.cordz_info());
+            CordRep::Unref(cord.as_tree());
+          }
+        }
+      });
+    }
+
+    // Run for 1 second to give memory and thread safety analyzers plenty of
+    // time to detect any mishaps or undefined behaviors.
+    absl::SleepFor(absl::Seconds(1));
+    stop.Notify();
+  }
+
+  std::cout << "Sampled " << sampled_node_count << " nodes\n";
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_info_test.cc b/abseil-cpp/absl/strings/internal/cordz_info_test.cc
new file mode 100644
index 0000000..cd226c3
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_info_test.cc
@@ -0,0 +1,342 @@
+// Copyright 2019 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_info.h"
+
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/symbolize.h"
+#include "absl/strings/cordz_test_helpers.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cordz_handle.h"
+#include "absl/strings/internal/cordz_statistics.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+#include "absl/strings/str_cat.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::HasSubstr;
+using ::testing::Ne;
+using ::testing::SizeIs;
+
+// Used test values
+auto constexpr kUnknownMethod = CordzUpdateTracker::kUnknown;
+auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;
+auto constexpr kChildMethod = CordzUpdateTracker::kConstructorCord;
+auto constexpr kUpdateMethod = CordzUpdateTracker::kAppendString;
+
+// Local less verbose helper
+std::vector<const CordzHandle*> DeleteQueue() {
+  return CordzHandle::DiagnosticsGetDeleteQueue();
+}
+
+std::string FormatStack(absl::Span<void* const> raw_stack) {
+  static constexpr size_t buf_size = 1 << 14;
+  std::unique_ptr<char[]> buf(new char[buf_size]);
+  std::string output;
+  for (void* stackp : raw_stack) {
+    if (absl::Symbolize(stackp, buf.get(), buf_size)) {
+      absl::StrAppend(&output, "    ", buf.get(), "\n");
+    }
+  }
+  return output;
+}
+
+TEST(CordzInfoTest, TrackCord) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+  ASSERT_THAT(info, Ne(nullptr));
+  EXPECT_FALSE(info->is_snapshot());
+  EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
+  EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
+  info->Untrack();
+}
+
+TEST(CordzInfoTest, MaybeTrackChildCordWithoutSampling) {
+  CordzSamplingIntervalHelper sample_none(99999);
+  TestCordData parent, child;
+  CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
+  EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, MaybeTrackChildCordWithSampling) {
+  CordzSamplingIntervalHelper sample_all(1);
+  TestCordData parent, child;
+  CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
+  EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingParentSampled) {
+  CordzSamplingIntervalHelper sample_none(99999);
+  TestCordData parent, child;
+  CordzInfo::TrackCord(parent.data, kTrackCordMethod);
+  CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
+  CordzInfo* parent_info = parent.data.cordz_info();
+  CordzInfo* child_info = child.data.cordz_info();
+  ASSERT_THAT(child_info, Ne(nullptr));
+  EXPECT_THAT(child_info->GetCordRepForTesting(), Eq(child.rep.rep));
+  EXPECT_THAT(child_info->GetParentStack(), parent_info->GetStack());
+  parent_info->Untrack();
+  child_info->Untrack();
+}
+
+TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingChildSampled) {
+  CordzSamplingIntervalHelper sample_none(99999);
+  TestCordData parent, child;
+  CordzInfo::TrackCord(child.data, kTrackCordMethod);
+  CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
+  EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, MaybeTrackChildCordWithSamplingChildSampled) {
+  CordzSamplingIntervalHelper sample_all(1);
+  TestCordData parent, child;
+  CordzInfo::TrackCord(child.data, kTrackCordMethod);
+  CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
+  EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, UntrackCord) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+
+  info->Untrack();
+  EXPECT_THAT(DeleteQueue(), SizeIs(0u));
+}
+
+TEST(CordzInfoTest, UntrackCordWithSnapshot) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+
+  CordzSnapshot snapshot;
+  info->Untrack();
+  EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));
+  EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
+  EXPECT_THAT(DeleteQueue(), ElementsAre(info, &snapshot));
+}
+
+TEST(CordzInfoTest, SetCordRep) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+
+  TestCordRep rep;
+  info->Lock(CordzUpdateTracker::kAppendCord);
+  info->SetCordRep(rep.rep);
+  info->Unlock();
+  EXPECT_THAT(info->GetCordRepForTesting(), Eq(rep.rep));
+
+  info->Untrack();
+}
+
+TEST(CordzInfoTest, SetCordRepNullUntracksCordOnUnlock) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+
+  info->Lock(CordzUpdateTracker::kAppendString);
+  info->SetCordRep(nullptr);
+  EXPECT_THAT(info->GetCordRepForTesting(), Eq(nullptr));
+  EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
+
+  info->Unlock();
+  EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, RefCordRep) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+
+  size_t refcount = data.rep.rep->refcount.Get();
+  EXPECT_THAT(info->RefCordRep(), Eq(data.rep.rep));
+  EXPECT_THAT(data.rep.rep->refcount.Get(), Eq(refcount + 1));
+  CordRep::Unref(data.rep.rep);
+  info->Untrack();
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+TEST(CordzInfoTest, SetCordRepRequiresMutex) {
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+  TestCordRep rep;
+  EXPECT_DEBUG_DEATH(info->SetCordRep(rep.rep), ".*");
+  info->Untrack();
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+TEST(CordzInfoTest, TrackUntrackHeadFirstV2) {
+  CordzSnapshot snapshot;
+  EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
+
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info1 = data.data.cordz_info();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
+  EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
+
+  TestCordData data2;
+  CordzInfo::TrackCord(data2.data, kTrackCordMethod);
+  CordzInfo* info2 = data2.data.cordz_info();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
+  EXPECT_THAT(info2->Next(snapshot), Eq(info1));
+  EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
+
+  info2->Untrack();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
+  EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
+
+  info1->Untrack();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, TrackUntrackTailFirstV2) {
+  CordzSnapshot snapshot;
+  EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
+
+  TestCordData data;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info1 = data.data.cordz_info();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
+  EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
+
+  TestCordData data2;
+  CordzInfo::TrackCord(data2.data, kTrackCordMethod);
+  CordzInfo* info2 = data2.data.cordz_info();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
+  EXPECT_THAT(info2->Next(snapshot), Eq(info1));
+  EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
+
+  info1->Untrack();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
+  EXPECT_THAT(info2->Next(snapshot), Eq(nullptr));
+
+  info2->Untrack();
+  ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
+}
+
+TEST(CordzInfoTest, StackV2) {
+  TestCordData data;
+  // kMaxStackDepth is intentionally less than 64 (which is the max depth that
+  // Cordz will record) because if the actual stack depth is over 64
+  // (which it is on Apple platforms) then the expected_stack will end up
+  // catching a few frames at the end that the actual_stack didn't get and
+  // it will no longer be subset. At the time of this writing 58 is the max
+  // that will allow this test to pass (with a minimum os version of iOS 9), so
+  // rounded down to 50 to hopefully not run into this in the future if Apple
+  // makes small modifications to its testing stack. 50 is sufficient to prove
+  // that we got a decent stack.
+  static constexpr int kMaxStackDepth = 50;
+  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo* info = data.data.cordz_info();
+  std::vector<void*> local_stack;
+  local_stack.resize(kMaxStackDepth);
+  // In some environments we don't get stack traces. For example in Android
+  // absl::GetStackTrace will return 0 indicating it didn't find any stack. The
+  // resultant formatted stack will be "", but that still equals the stack
+  // recorded in CordzInfo, which is also empty. The skip_count is 1 so that the
+  // line number of the current stack isn't included in the HasSubstr check.
+  local_stack.resize(static_cast<size_t>(
+      absl::GetStackTrace(local_stack.data(), kMaxStackDepth,
+                          /*skip_count=*/1)));
+
+  std::string got_stack = FormatStack(info->GetStack());
+  std::string expected_stack = FormatStack(local_stack);
+  // If TrackCord is inlined, got_stack should match expected_stack. If it isn't
+  // inlined, got_stack should include an additional frame not present in
+  // expected_stack. Either way, expected_stack should be a substring of
+  // got_stack.
+  EXPECT_THAT(got_stack, HasSubstr(expected_stack));
+
+  info->Untrack();
+}
+
+// Local helper functions to get different stacks for child and parent.
+CordzInfo* TrackChildCord(InlineData& data, const InlineData& parent) {
+  CordzInfo::TrackCord(data, parent, kChildMethod);
+  return data.cordz_info();
+}
+CordzInfo* TrackParentCord(InlineData& data) {
+  CordzInfo::TrackCord(data, kTrackCordMethod);
+  return data.cordz_info();
+}
+
+TEST(CordzInfoTest, GetStatistics) {
+  TestCordData data;
+  CordzInfo* info = TrackParentCord(data.data);
+
+  CordzStatistics statistics = info->GetCordzStatistics();
+  EXPECT_THAT(statistics.size, Eq(data.rep.rep->length));
+  EXPECT_THAT(statistics.method, Eq(kTrackCordMethod));
+  EXPECT_THAT(statistics.parent_method, Eq(kUnknownMethod));
+  EXPECT_THAT(statistics.update_tracker.Value(kTrackCordMethod), Eq(1));
+
+  info->Untrack();
+}
+
+TEST(CordzInfoTest, LockCountsMethod) {
+  TestCordData data;
+  CordzInfo* info = TrackParentCord(data.data);
+
+  info->Lock(kUpdateMethod);
+  info->Unlock();
+  info->Lock(kUpdateMethod);
+  info->Unlock();
+
+  CordzStatistics statistics = info->GetCordzStatistics();
+  EXPECT_THAT(statistics.update_tracker.Value(kUpdateMethod), Eq(2));
+
+  info->Untrack();
+}
+
+TEST(CordzInfoTest, FromParent) {
+  TestCordData parent;
+  TestCordData child;
+  CordzInfo* info_parent = TrackParentCord(parent.data);
+  CordzInfo* info_child = TrackChildCord(child.data, parent.data);
+
+  std::string stack = FormatStack(info_parent->GetStack());
+  std::string parent_stack = FormatStack(info_child->GetParentStack());
+  EXPECT_THAT(stack, Eq(parent_stack));
+
+  CordzStatistics statistics = info_child->GetCordzStatistics();
+  EXPECT_THAT(statistics.size, Eq(child.rep.rep->length));
+  EXPECT_THAT(statistics.method, Eq(kChildMethod));
+  EXPECT_THAT(statistics.parent_method, Eq(kTrackCordMethod));
+  EXPECT_THAT(statistics.update_tracker.Value(kChildMethod), Eq(1));
+
+  info_parent->Untrack();
+  info_child->Untrack();
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_sample_token.cc b/abseil-cpp/absl/strings/internal/cordz_sample_token.cc
new file mode 100644
index 0000000..ba1270d
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_sample_token.cc
@@ -0,0 +1,64 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_sample_token.h"
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cordz_handle.h"
+#include "absl/strings/internal/cordz_info.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+CordzSampleToken::Iterator& CordzSampleToken::Iterator::operator++() {
+  if (current_) {
+    current_ = current_->Next(*token_);
+  }
+  return *this;
+}
+
+CordzSampleToken::Iterator CordzSampleToken::Iterator::operator++(int) {
+  Iterator it(*this);
+  operator++();
+  return it;
+}
+
+bool operator==(const CordzSampleToken::Iterator& lhs,
+                const CordzSampleToken::Iterator& rhs) {
+  return lhs.current_ == rhs.current_ &&
+         (lhs.current_ == nullptr || lhs.token_ == rhs.token_);
+}
+
+bool operator!=(const CordzSampleToken::Iterator& lhs,
+                const CordzSampleToken::Iterator& rhs) {
+  return !(lhs == rhs);
+}
+
+CordzSampleToken::Iterator::reference CordzSampleToken::Iterator::operator*()
+    const {
+  return *current_;
+}
+
+CordzSampleToken::Iterator::pointer CordzSampleToken::Iterator::operator->()
+    const {
+  return current_;
+}
+
+CordzSampleToken::Iterator::Iterator(const CordzSampleToken* token)
+    : token_(token), current_(CordzInfo::Head(*token)) {}
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_sample_token.h b/abseil-cpp/absl/strings/internal/cordz_sample_token.h
new file mode 100644
index 0000000..2a86bc3
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_sample_token.h
@@ -0,0 +1,97 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cordz_handle.h"
+#include "absl/strings/internal/cordz_info.h"
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// The existence of a CordzSampleToken guarantees that a reader can traverse the
+// global_cordz_infos_head linked-list without needing to hold a mutex. When a
+// CordzSampleToken exists, all CordzInfo objects that would be destroyed are
+// instead appended to a deletion queue. When the CordzSampleToken is destroyed,
+// it will also clean up any of these CordzInfo objects.
+//
+// E.g., ST are CordzSampleToken objects and CH are CordzHandle objects.
+//   ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail
+//
+// This list tracks that CH1 and CH2 were created after ST1, so the thread
+// holding ST1 might have a reference to CH1, CH2, ST2, and CH3. However, ST2
+// was created later, so the thread holding the ST2 token cannot have a
+// reference to ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will
+// delete ST1, CH1, and CH2. If instead ST2 is cleaned up first, that thread
+// will only delete ST2.
+//
+// If ST1 is cleaned up first, the new list will be:
+//   ST2 <- CH3 <- global_delete_queue_tail
+//
+// If ST2 is cleaned up first, the new list will be:
+//   ST1 <- CH1 <- CH2 <- CH3 <- global_delete_queue_tail
+//
+// All new CordzHandle objects are appended to the list, so if a new thread
+// comes along before either ST1 or ST2 are cleaned up, the new list will be:
+//   ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- ST3 <- global_delete_queue_tail
+//
+// A thread must hold the global_delete_queue_mu mutex whenever it's altering
+// this list.
+//
+// It is safe for thread that holds a CordzSampleToken to read
+// global_cordz_infos at any time since the objects it is able to retrieve will
+// not be deleted while the CordzSampleToken exists.
+class CordzSampleToken : public CordzSnapshot {
+ public:
+  class Iterator {
+   public:
+    using iterator_category = std::input_iterator_tag;
+    using value_type = const CordzInfo&;
+    using difference_type = ptrdiff_t;
+    using pointer = const CordzInfo*;
+    using reference = value_type;
+
+    Iterator() = default;
+
+    Iterator& operator++();
+    Iterator operator++(int);
+    friend bool operator==(const Iterator& lhs, const Iterator& rhs);
+    friend bool operator!=(const Iterator& lhs, const Iterator& rhs);
+    reference operator*() const;
+    pointer operator->() const;
+
+   private:
+    friend class CordzSampleToken;
+    explicit Iterator(const CordzSampleToken* token);
+
+    const CordzSampleToken* token_ = nullptr;
+    pointer current_ = nullptr;
+  };
+
+  CordzSampleToken() = default;
+  CordzSampleToken(const CordzSampleToken&) = delete;
+  CordzSampleToken& operator=(const CordzSampleToken&) = delete;
+
+  Iterator begin() { return Iterator(this); }
+  Iterator end() { return Iterator(); }
+};
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc b/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc
new file mode 100644
index 0000000..6be1770
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc
@@ -0,0 +1,208 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_sample_token.h"
+
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/memory/memory.h"
+#include "absl/random/random.h"
+#include "absl/strings/cordz_test_helpers.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cordz_handle.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Ne;
+
+// Used test values
+auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;
+
+TEST(CordzSampleTokenTest, IteratorTraits) {
+  static_assert(std::is_copy_constructible<CordzSampleToken::Iterator>::value,
+                "");
+  static_assert(std::is_copy_assignable<CordzSampleToken::Iterator>::value, "");
+  static_assert(std::is_move_constructible<CordzSampleToken::Iterator>::value,
+                "");
+  static_assert(std::is_move_assignable<CordzSampleToken::Iterator>::value, "");
+  static_assert(
+      std::is_same<
+          std::iterator_traits<CordzSampleToken::Iterator>::iterator_category,
+          std::input_iterator_tag>::value,
+      "");
+  static_assert(
+      std::is_same<std::iterator_traits<CordzSampleToken::Iterator>::value_type,
+                   const CordzInfo&>::value,
+      "");
+  static_assert(
+      std::is_same<
+          std::iterator_traits<CordzSampleToken::Iterator>::difference_type,
+          ptrdiff_t>::value,
+      "");
+  static_assert(
+      std::is_same<std::iterator_traits<CordzSampleToken::Iterator>::pointer,
+                   const CordzInfo*>::value,
+      "");
+  static_assert(
+      std::is_same<std::iterator_traits<CordzSampleToken::Iterator>::reference,
+                   const CordzInfo&>::value,
+      "");
+}
+
+TEST(CordzSampleTokenTest, IteratorEmpty) {
+  CordzSampleToken token;
+  EXPECT_THAT(token.begin(), Eq(token.end()));
+}
+
+TEST(CordzSampleTokenTest, Iterator) {
+  TestCordData cord1, cord2, cord3;
+  CordzInfo::TrackCord(cord1.data, kTrackCordMethod);
+  CordzInfo* info1 = cord1.data.cordz_info();
+  CordzInfo::TrackCord(cord2.data, kTrackCordMethod);
+  CordzInfo* info2 = cord2.data.cordz_info();
+  CordzInfo::TrackCord(cord3.data, kTrackCordMethod);
+  CordzInfo* info3 = cord3.data.cordz_info();
+
+  CordzSampleToken token;
+  std::vector<const CordzInfo*> found;
+  for (const CordzInfo& cord_info : token) {
+    found.push_back(&cord_info);
+  }
+
+  EXPECT_THAT(found, ElementsAre(info3, info2, info1));
+
+  info1->Untrack();
+  info2->Untrack();
+  info3->Untrack();
+}
+
+TEST(CordzSampleTokenTest, IteratorEquality) {
+  TestCordData cord1;
+  TestCordData cord2;
+  TestCordData cord3;
+  CordzInfo::TrackCord(cord1.data, kTrackCordMethod);
+  CordzInfo* info1 = cord1.data.cordz_info();
+
+  CordzSampleToken token1;
+  // lhs starts with the CordzInfo corresponding to cord1 at the head.
+  CordzSampleToken::Iterator lhs = token1.begin();
+
+  CordzInfo::TrackCord(cord2.data, kTrackCordMethod);
+  CordzInfo* info2 = cord2.data.cordz_info();
+
+  CordzSampleToken token2;
+  // rhs starts with the CordzInfo corresponding to cord2 at the head.
+  CordzSampleToken::Iterator rhs = token2.begin();
+
+  CordzInfo::TrackCord(cord3.data, kTrackCordMethod);
+  CordzInfo* info3 = cord3.data.cordz_info();
+
+  // lhs is on cord1 while rhs is on cord2.
+  EXPECT_THAT(lhs, Ne(rhs));
+
+  rhs++;
+  // lhs and rhs are both on cord1, but they didn't come from the same
+  // CordzSampleToken.
+  EXPECT_THAT(lhs, Ne(rhs));
+
+  lhs++;
+  rhs++;
+  // Both lhs and rhs are done, so they are on nullptr.
+  EXPECT_THAT(lhs, Eq(rhs));
+
+  info1->Untrack();
+  info2->Untrack();
+  info3->Untrack();
+}
+
+TEST(CordzSampleTokenTest, MultiThreaded) {
+  Notification stop;
+  static constexpr int kNumThreads = 4;
+  static constexpr int kNumCords = 3;
+  static constexpr int kNumTokens = 3;
+  absl::synchronization_internal::ThreadPool pool(kNumThreads);
+
+  for (int i = 0; i < kNumThreads; ++i) {
+    pool.Schedule([&stop]() {
+      absl::BitGen gen;
+      TestCordData cords[kNumCords];
+      std::unique_ptr<CordzSampleToken> tokens[kNumTokens];
+
+      while (!stop.HasBeenNotified()) {
+        // Randomly perform one of five actions:
+        //   1) Untrack
+        //   2) Track
+        //   3) Iterate over Cords visible to a token.
+        //   4) Unsample
+        //   5) Sample
+        int index = absl::Uniform(gen, 0, kNumCords);
+        if (absl::Bernoulli(gen, 0.5)) {
+          TestCordData& cord = cords[index];
+          // Track/untrack.
+          if (cord.data.is_profiled()) {
+            // 1) Untrack
+            cord.data.cordz_info()->Untrack();
+            cord.data.clear_cordz_info();
+          } else {
+            // 2) Track
+            CordzInfo::TrackCord(cord.data, kTrackCordMethod);
+          }
+        } else {
+          std::unique_ptr<CordzSampleToken>& token = tokens[index];
+          if (token) {
+            if (absl::Bernoulli(gen, 0.5)) {
+              // 3) Iterate over Cords visible to a token.
+              for (const CordzInfo& info : *token) {
+                // This is trivial work to allow us to compile the loop.
+                EXPECT_THAT(info.Next(*token), Ne(&info));
+              }
+            } else {
+              // 4) Unsample
+              token = nullptr;
+            }
+          } else {
+            // 5) Sample
+            token = absl::make_unique<CordzSampleToken>();
+          }
+        }
+      }
+      for (TestCordData& cord : cords) {
+        CordzInfo::MaybeUntrackCord(cord.data.cordz_info());
+      }
+    });
+  }
+  // The threads will hammer away.  Give it a little bit of time for tsan to
+  // spot errors.
+  absl::SleepFor(absl::Seconds(3));
+  stop.Notify();
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_statistics.h b/abseil-cpp/absl/strings/internal/cordz_statistics.h
new file mode 100644
index 0000000..9f558df
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_statistics.h
@@ -0,0 +1,88 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzStatistics captures some meta information about a Cord's shape.
+struct CordzStatistics {
+  using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
+
+  // Node counts information
+  struct NodeCounts {
+    size_t flat = 0;       // #flats
+    size_t flat_64 = 0;    // #flats up to 64 bytes
+    size_t flat_128 = 0;   // #flats up to 128 bytes
+    size_t flat_256 = 0;   // #flats up to 256 bytes
+    size_t flat_512 = 0;   // #flats up to 512 bytes
+    size_t flat_1k = 0;    // #flats up to 1K bytes
+    size_t external = 0;   // #external reps
+    size_t substring = 0;  // #substring reps
+    size_t concat = 0;     // #concat reps
+    size_t ring = 0;       // #ring buffer reps
+    size_t btree = 0;      // #btree reps
+    size_t crc = 0;        // #crc reps
+  };
+
+  // The size of the cord in bytes. This matches the result of Cord::size().
+  size_t size = 0;
+
+  // The estimated memory used by the sampled cord. This value matches the
+  // value as reported by Cord::EstimatedMemoryUsage().
+  // A value of 0 implies the property has not been recorded.
+  size_t estimated_memory_usage = 0;
+
+  // The effective memory used by the sampled cord, inversely weighted by the
+  // effective indegree of each allocated node. This is a representation of the
+  // fair share of memory usage that should be attributed to the sampled cord.
+  // This value is more useful for cases where one or more nodes are referenced
+  // by multiple Cord instances, and for cases where a Cord includes the same
+  // node multiple times (either directly or indirectly).
+  // A value of 0 implies the property has not been recorded.
+  size_t estimated_fair_share_memory_usage = 0;
+
+  // The total number of nodes referenced by this cord.
+  // For ring buffer Cords, this includes the 'ring buffer' node.
+  // For btree Cords, this includes all 'CordRepBtree' tree nodes as well as all
+  // the substring, flat and external nodes referenced by the tree.
+  // A value of 0 implies the property has not been recorded.
+  size_t node_count = 0;
+
+  // Detailed node counts per type
+  NodeCounts node_counts;
+
+  // The cord method responsible for sampling the cord.
+  MethodIdentifier method = MethodIdentifier::kUnknown;
+
+  // The cord method responsible for sampling the parent cord if applicable.
+  MethodIdentifier parent_method = MethodIdentifier::kUnknown;
+
+  // Update tracker tracking invocation count per cord method.
+  CordzUpdateTracker update_tracker;
+};
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_update_scope.h b/abseil-cpp/absl/strings/internal/cordz_update_scope.h
new file mode 100644
index 0000000..57ba75d
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_update_scope.h
@@ -0,0 +1,71 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
+
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzUpdateScope scopes an update to the provided CordzInfo.
+// The class invokes `info->Lock(method)` and `info->Unlock()` to guard
+// cordrep updates. This class does nothing if `info` is null.
+// See also the 'Lock`, `Unlock` and `SetCordRep` methods in `CordzInfo`.
+class ABSL_SCOPED_LOCKABLE CordzUpdateScope {
+ public:
+  CordzUpdateScope(CordzInfo* info, CordzUpdateTracker::MethodIdentifier method)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(info)
+      : info_(info) {
+    if (ABSL_PREDICT_FALSE(info_)) {
+      info->Lock(method);
+    }
+  }
+
+  // CordzUpdateScope can not be copied or assigned to.
+  CordzUpdateScope(CordzUpdateScope&& rhs) = delete;
+  CordzUpdateScope(const CordzUpdateScope&) = delete;
+  CordzUpdateScope& operator=(CordzUpdateScope&& rhs) = delete;
+  CordzUpdateScope& operator=(const CordzUpdateScope&) = delete;
+
+  ~CordzUpdateScope() ABSL_UNLOCK_FUNCTION() {
+    if (ABSL_PREDICT_FALSE(info_)) {
+      info_->Unlock();
+    }
+  }
+
+  void SetCordRep(CordRep* rep) const {
+    if (ABSL_PREDICT_FALSE(info_)) {
+      info_->SetCordRep(rep);
+    }
+  }
+
+  CordzInfo* info() const { return info_; }
+
+ private:
+  CordzInfo* info_;
+};
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_update_scope_test.cc b/abseil-cpp/absl/strings/internal/cordz_update_scope_test.cc
new file mode 100644
index 0000000..3d08c62
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_update_scope_test.cc
@@ -0,0 +1,49 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_update_scope.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/strings/cordz_test_helpers.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cordz_info.h"
+#include "absl/strings/internal/cordz_update_tracker.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+// Used test values
+auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;
+
+TEST(CordzUpdateScopeTest, ScopeNullptr) {
+  CordzUpdateScope scope(nullptr, kTrackCordMethod);
+}
+
+TEST(CordzUpdateScopeTest, ScopeSampledCord) {
+  TestCordData cord;
+  CordzInfo::TrackCord(cord.data, kTrackCordMethod);
+  CordzUpdateScope scope(cord.data.cordz_info(), kTrackCordMethod);
+  cord.data.cordz_info()->SetCordRep(nullptr);
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace cord_internal
+
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/cordz_update_tracker.h b/abseil-cpp/absl/strings/internal/cordz_update_tracker.h
new file mode 100644
index 0000000..c517066
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_update_tracker.h
@@ -0,0 +1,123 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzUpdateTracker tracks counters for Cord update methods.
+//
+// The purpose of CordzUpdateTracker is to track the number of calls to methods
+// updating Cord data for sampled cords. The class internally uses 'lossy'
+// atomic operations: Cord is thread-compatible, so there is no need to
+// synchronize updates. However, Cordz collection threads may call 'Value()' at
+// any point, so the class needs to provide thread safe access.
+//
+// This class is thread-safe. But as per above comments, all non-const methods
+// should be used single-threaded only: updates are thread-safe but lossy.
+class CordzUpdateTracker {
+ public:
+  // Tracked update methods.
+  enum MethodIdentifier {
+    kUnknown,
+    kAppendCord,
+    kAppendCordBuffer,
+    kAppendExternalMemory,
+    kAppendString,
+    kAssignCord,
+    kAssignString,
+    kClear,
+    kConstructorCord,
+    kConstructorString,
+    kCordReader,
+    kFlatten,
+    kGetAppendBuffer,
+    kGetAppendRegion,
+    kMakeCordFromExternal,
+    kMoveAppendCord,
+    kMoveAssignCord,
+    kMovePrependCord,
+    kPrependCord,
+    kPrependCordBuffer,
+    kPrependString,
+    kRemovePrefix,
+    kRemoveSuffix,
+    kSetExpectedChecksum,
+    kSubCord,
+
+    // kNumMethods defines the number of entries: must be the last entry.
+    kNumMethods,
+  };
+
+  // Constructs a new instance. All counters are zero-initialized.
+  constexpr CordzUpdateTracker() noexcept : values_{} {}
+
+  // Copy constructs a new instance.
+  CordzUpdateTracker(const CordzUpdateTracker& rhs) noexcept { *this = rhs; }
+
+  // Assigns the provided value to this instance.
+  CordzUpdateTracker& operator=(const CordzUpdateTracker& rhs) noexcept {
+    for (int i = 0; i < kNumMethods; ++i) {
+      values_[i].store(rhs.values_[i].load(std::memory_order_relaxed),
+                       std::memory_order_relaxed);
+    }
+    return *this;
+  }
+
+  // Returns the value for the specified method.
+  int64_t Value(MethodIdentifier method) const {
+    return values_[method].load(std::memory_order_relaxed);
+  }
+
+  // Increases the value for the specified method by `n`
+  void LossyAdd(MethodIdentifier method, int64_t n = 1) {
+    auto& value = values_[method];
+    value.store(value.load(std::memory_order_relaxed) + n,
+                std::memory_order_relaxed);
+  }
+
+  // Adds all the values from `src` to this instance
+  void LossyAdd(const CordzUpdateTracker& src) {
+    for (int i = 0; i < kNumMethods; ++i) {
+      MethodIdentifier method = static_cast<MethodIdentifier>(i);
+      if (int64_t value = src.Value(method)) {
+        LossyAdd(method, value);
+      }
+    }
+  }
+
+ private:
+  // Until C++20 std::atomic is not constexpr default-constructible, so we need
+  // a wrapper for this class to be constexpr constructible.
+  class Counter : public std::atomic<int64_t> {
+   public:
+    constexpr Counter() noexcept : std::atomic<int64_t>(0) {}
+  };
+
+  Counter values_[kNumMethods];
+};
+
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
diff --git a/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc b/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc
new file mode 100644
index 0000000..9b1f798
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc
@@ -0,0 +1,147 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cordz_update_tracker.h"
+
+#include <array>
+#include <thread>  // NOLINT
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/synchronization/notification.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::testing::AnyOf;
+using ::testing::Eq;
+
+using Method = CordzUpdateTracker::MethodIdentifier;
+using Methods = std::array<Method, Method::kNumMethods>;
+
+// Returns an array of all methods defined in `MethodIdentifier`
+Methods AllMethods() {
+  return Methods{Method::kUnknown,
+                 Method::kAppendCord,
+                 Method::kAppendCordBuffer,
+                 Method::kAppendExternalMemory,
+                 Method::kAppendString,
+                 Method::kAssignCord,
+                 Method::kAssignString,
+                 Method::kClear,
+                 Method::kConstructorCord,
+                 Method::kConstructorString,
+                 Method::kCordReader,
+                 Method::kFlatten,
+                 Method::kGetAppendBuffer,
+                 Method::kGetAppendRegion,
+                 Method::kMakeCordFromExternal,
+                 Method::kMoveAppendCord,
+                 Method::kMoveAssignCord,
+                 Method::kMovePrependCord,
+                 Method::kPrependCord,
+                 Method::kPrependCordBuffer,
+                 Method::kPrependString,
+                 Method::kRemovePrefix,
+                 Method::kRemoveSuffix,
+                 Method::kSetExpectedChecksum,
+                 Method::kSubCord};
+}
+
+TEST(CordzUpdateTracker, IsConstExprAndInitializesToZero) {
+  constexpr CordzUpdateTracker tracker;
+  for (Method method : AllMethods()) {
+    ASSERT_THAT(tracker.Value(method), Eq(0));
+  }
+}
+
+TEST(CordzUpdateTracker, LossyAdd) {
+  int64_t n = 1;
+  CordzUpdateTracker tracker;
+  for (Method method : AllMethods()) {
+    tracker.LossyAdd(method, n);
+    EXPECT_THAT(tracker.Value(method), Eq(n));
+    n += 2;
+  }
+}
+
+TEST(CordzUpdateTracker, CopyConstructor) {
+  int64_t n = 1;
+  CordzUpdateTracker src;
+  for (Method method : AllMethods()) {
+    src.LossyAdd(method, n);
+    n += 2;
+  }
+
+  n = 1;
+  CordzUpdateTracker tracker(src);
+  for (Method method : AllMethods()) {
+    EXPECT_THAT(tracker.Value(method), Eq(n));
+    n += 2;
+  }
+}
+
+TEST(CordzUpdateTracker, OperatorAssign) {
+  int64_t n = 1;
+  CordzUpdateTracker src;
+  CordzUpdateTracker tracker;
+  for (Method method : AllMethods()) {
+    src.LossyAdd(method, n);
+    n += 2;
+  }
+
+  n = 1;
+  tracker = src;
+  for (Method method : AllMethods()) {
+    EXPECT_THAT(tracker.Value(method), Eq(n));
+    n += 2;
+  }
+}
+
+TEST(CordzUpdateTracker, ThreadSanitizedValueCheck) {
+  absl::Notification done;
+  CordzUpdateTracker tracker;
+
+  std::thread reader([&done, &tracker] {
+    while (!done.HasBeenNotified()) {
+      int n = 1;
+      for (Method method : AllMethods()) {
+        EXPECT_THAT(tracker.Value(method), AnyOf(Eq(n), Eq(0)));
+        n += 2;
+      }
+    }
+    int n = 1;
+    for (Method method : AllMethods()) {
+      EXPECT_THAT(tracker.Value(method), Eq(n));
+      n += 2;
+    }
+  });
+
+  int64_t n = 1;
+  for (Method method : AllMethods()) {
+    tracker.LossyAdd(method, n);
+    n += 2;
+  }
+  done.Notify();
+  reader.join();
+}
+
+}  // namespace
+}  // namespace cord_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc b/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc
new file mode 100644
index 0000000..a084568
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc
@@ -0,0 +1,93 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/damerau_levenshtein_distance.h"
+
+#include <algorithm>
+#include <array>
+#include <numeric>
+
+#include "absl/strings/string_view.h"
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+// Calculate DamerauLevenshtein (adjacent transpositions) distance
+// between two strings,
+// https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance. The
+// algorithm follows the condition that no substring is edited more than once.
+// While this can reduce is larger distance, it's a) a much simpler algorithm
+// and b) more realistic for the case that typographic mistakes should be
+// detected.
+// When the distance is larger than cutoff, or one of the strings has more
+// than MAX_SIZE=100 characters, the code returns min(MAX_SIZE, cutoff) + 1.
+uint8_t CappedDamerauLevenshteinDistance(absl::string_view s1,
+                                         absl::string_view s2, uint8_t cutoff) {
+  const uint8_t MAX_SIZE = 100;
+  const uint8_t _cutoff = std::min(MAX_SIZE, cutoff);
+  const uint8_t cutoff_plus_1 = static_cast<uint8_t>(_cutoff + 1);
+
+  if (s1.size() > s2.size()) std::swap(s1, s2);
+  if (s1.size() + _cutoff < s2.size() || s2.size() > MAX_SIZE)
+    return cutoff_plus_1;
+
+  if (s1.empty())
+    return static_cast<uint8_t>(s2.size());
+
+  // Lower diagonal bound: y = x - lower_diag
+  const uint8_t lower_diag =
+      _cutoff - static_cast<uint8_t>(s2.size() - s1.size());
+  // Upper diagonal bound: y = x + upper_diag
+  const uint8_t upper_diag = _cutoff;
+
+  // d[i][j] is the number of edits required to convert s1[0, i] to s2[0, j]
+  std::array<std::array<uint8_t, MAX_SIZE + 2>, MAX_SIZE + 2> d;
+  std::iota(d[0].begin(), d[0].begin() + upper_diag + 1, 0);
+  d[0][cutoff_plus_1] = cutoff_plus_1;
+  for (size_t i = 1; i <= s1.size(); ++i) {
+    // Deduce begin of relevant window.
+    size_t j_begin = 1;
+    if (i > lower_diag) {
+      j_begin = i - lower_diag;
+      d[i][j_begin - 1] = cutoff_plus_1;
+    } else {
+      d[i][0] = static_cast<uint8_t>(i);
+    }
+
+    // Deduce end of relevant window.
+    size_t j_end = i + upper_diag;
+    if (j_end > s2.size()) {
+      j_end = s2.size();
+    } else {
+      d[i][j_end + 1] = cutoff_plus_1;
+    }
+
+    for (size_t j = j_begin; j <= j_end; ++j) {
+      const uint8_t deletion_distance = d[i - 1][j] + 1;
+      const uint8_t insertion_distance = d[i][j - 1] + 1;
+      const uint8_t mismatched_tail_cost = s1[i - 1] == s2[j - 1] ? 0 : 1;
+      const uint8_t mismatch_distance = d[i - 1][j - 1] + mismatched_tail_cost;
+      uint8_t transposition_distance = _cutoff + 1;
+      if (i > 1 && j > 1 && s1[i - 1] == s2[j - 2] && s1[i - 2] == s2[j - 1])
+        transposition_distance = d[i - 2][j - 2] + 1;
+      d[i][j] = std::min({cutoff_plus_1, deletion_distance, insertion_distance,
+                          mismatch_distance, transposition_distance});
+    }
+  }
+  return d[s1.size()][s2.size()];
+}
+
+}  // namespace strings_internal
+
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h b/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h
new file mode 100644
index 0000000..7a4bd64
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h
@@ -0,0 +1,34 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_
+#define ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_
+
+#include <cstdint>
+
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+// Calculate DamerauLevenshtein distance between two strings.
+// When the distance is larger than cutoff, the code just returns cutoff + 1.
+uint8_t CappedDamerauLevenshteinDistance(absl::string_view s1,
+                                         absl::string_view s2, uint8_t cutoff);
+
+}  // namespace strings_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_
diff --git a/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc b/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc
new file mode 100644
index 0000000..49dd105
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc
@@ -0,0 +1,99 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/damerau_levenshtein_distance.h"
+
+#include <cstdint>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+using absl::strings_internal::CappedDamerauLevenshteinDistance;
+
+TEST(Distance, TestDistances) {
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("ab", "ab", 6), uint8_t{0});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("a", "b", 6), uint8_t{1});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("ca", "abc", 6), uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "ad", 6), uint8_t{2});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "cadb", 6), uint8_t{4});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "bdac", 6), uint8_t{4});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("ab", "ab", 0), uint8_t{0});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("", "", 0), uint8_t{0});
+  // combinations for 3-character strings:
+  // 1, 2, 3 removals, insertions or replacements and transpositions
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", "abc", 6), uint8_t{0});
+  for (auto res :
+       {"", "ca", "efg", "ea", "ce", "ceb", "eca", "cae", "cea", "bea"}) {
+    EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{3});
+    EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{3});
+  }
+  for (auto res :
+       {"a",   "b",   "c",   "ba",  "cb",  "bca", "cab", "cba", "ace",
+        "efc", "ebf", "aef", "ae",  "be",  "eb",  "ec",  "ecb", "bec",
+        "bce", "cbe", "ace", "eac", "aeb", "bae", "eab", "eba"}) {
+    EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{2});
+    EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{2});
+  }
+  for (auto res : {"ab", "ac", "bc", "acb", "bac", "ebc", "aec", "abe"}) {
+    EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{1});
+    EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{1});
+  }
+}
+
+TEST(Distance, TestCutoff) {
+  // Returning cutoff + 1 if the value is larger than cutoff or string longer
+  // than MAX_SIZE.
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 3), uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 2), uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 1), uint8_t{2});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("abcdefg", "a", 2), uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance("a", "abcde", 2), uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(102, 'a'),
+                                               std::string(102, 'a'), 105),
+              uint8_t{101});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
+                                               std::string(100, 'a'), 100),
+              uint8_t{0});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
+                                               std::string(100, 'b'), 100),
+              uint8_t{100});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
+                                               std::string(99, 'a'), 2),
+              uint8_t{1});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
+                                               std::string(101, 'a'), 2),
+              uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
+                                               std::string(101, 'a'), 2),
+              uint8_t{3});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX + 1, 'a'),
+                                               std::string(UINT8_MAX + 1, 'b'),
+                                               UINT8_MAX),
+              uint8_t{101});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX - 1, 'a'),
+                                               std::string(UINT8_MAX - 1, 'b'),
+                                               UINT8_MAX),
+              uint8_t{101});
+  EXPECT_THAT(
+      CappedDamerauLevenshteinDistance(std::string(UINT8_MAX, 'a'),
+                                       std::string(UINT8_MAX, 'b'), UINT8_MAX),
+      uint8_t{101});
+  EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX - 1, 'a'),
+                                               std::string(UINT8_MAX - 1, 'a'),
+                                               UINT8_MAX),
+              uint8_t{101});
+}
+}  // namespace
diff --git a/abseil-cpp/absl/strings/internal/escaping.cc b/abseil-cpp/absl/strings/internal/escaping.cc
index c527128..56a4cbe 100644
--- a/abseil-cpp/absl/strings/internal/escaping.cc
+++ b/abseil-cpp/absl/strings/internal/escaping.cc
@@ -21,26 +21,26 @@
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
 
-const char kBase64Chars[] =
+// The two strings below provide maps from normal 6-bit characters to their
+// base64-escaped equivalent.
+// For the inverse case, see kUn(WebSafe)Base64 in the external
+// escaping.cc.
+ABSL_CONST_INIT const char kBase64Chars[] =
     "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
 
+ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+
 size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
   // Base64 encodes three bytes of input at a time. If the input is not
   // divisible by three, we pad as appropriate.
   //
-  // (from https://tools.ietf.org/html/rfc3548)
-  // Special processing is performed if fewer than 24 bits are available
-  // at the end of the data being encoded.  A full encoding quantum is
-  // always completed at the end of a quantity.  When fewer than 24 input
-  // bits are available in an input group, zero bits are added (on the
-  // right) to form an integral number of 6-bit groups.  Padding at the
-  // end of the data is performed using the '=' character.  Since all base
-  // 64 input is an integral number of octets, only the following cases
-  // can arise:
-
   // Base64 encodes each three bytes of input into four bytes of output.
   size_t len = (input_len / 3) * 4;
 
+  // Since all base 64 input is an integral number of octets, only the following
+  // cases can arise:
   if (input_len % 3 == 0) {
     // (from https://tools.ietf.org/html/rfc3548)
     // (1) the final quantum of encoding input is an integral multiple of 24
@@ -70,6 +70,21 @@
   return len;
 }
 
+// ----------------------------------------------------------------------
+//   Take the input in groups of 4 characters and turn each
+//   character into a code 0 to 63 thus:
+//           A-Z map to 0 to 25
+//           a-z map to 26 to 51
+//           0-9 map to 52 to 61
+//           +(- for WebSafe) maps to 62
+//           /(_ for WebSafe) maps to 63
+//   There will be four numbers, all less than 64 which can be represented
+//   by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
+//   Arrange the 6 digit binary numbers into three bytes as such:
+//   aaaaaabb bbbbcccc ccdddddd
+//   Equals signs (one or two) are used at the end of the encoded block to
+//   indicate that the text was not an integer multiple of three bytes long.
+// ----------------------------------------------------------------------
 size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
                             size_t szdest, const char* base64,
                             bool do_padding) {
@@ -83,6 +98,16 @@
   char* const limit_dest = dest + szdest;
   const unsigned char* const limit_src = src + szsrc;
 
+  // (from https://tools.ietf.org/html/rfc3548)
+  // Special processing is performed if fewer than 24 bits are available
+  // at the end of the data being encoded.  A full encoding quantum is
+  // always completed at the end of a quantity.  When fewer than 24 input
+  // bits are available in an input group, zero bits are added (on the
+  // right) to form an integral number of 6-bit groups.
+  //
+  // If do_padding is true, padding at the end of the data is performed. This
+  // output padding uses the '=' character.
+
   // Three bytes of data encodes to four characters of cyphertext.
   // So we can pump through three-byte chunks atomically.
   if (szsrc >= 3) {                    // "limit_src - 3" is UB if szsrc < 3.
@@ -102,8 +127,8 @@
     }
   }
   // To save time, we didn't update szdest or szsrc in the loop.  So do it now.
-  szdest = limit_dest - cur_dest;
-  szsrc = limit_src - cur_src;
+  szdest = static_cast<size_t>(limit_dest - cur_dest);
+  szsrc = static_cast<size_t>(limit_src - cur_src);
 
   /* now deal with the tail (<=3 bytes) */
   switch (szsrc) {
@@ -154,7 +179,8 @@
       // the loop because the loop above always reads 4 bytes, and the fourth
       // byte is past the end of the input.
       if (szdest < 4) return 0;
-      uint32_t in = (cur_src[0] << 16) + absl::big_endian::Load16(cur_src + 1);
+      uint32_t in =
+          (uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1);
       cur_dest[0] = base64[in >> 18];
       in &= 0x3FFFF;
       cur_dest[1] = base64[in >> 12];
@@ -172,7 +198,7 @@
       ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
       break;
   }
-  return (cur_dest - dest);
+  return static_cast<size_t>(cur_dest - dest);
 }
 
 }  // namespace strings_internal
diff --git a/abseil-cpp/absl/strings/internal/escaping.h b/abseil-cpp/absl/strings/internal/escaping.h
index 6a9ce60..2186f77 100644
--- a/abseil-cpp/absl/strings/internal/escaping.h
+++ b/abseil-cpp/absl/strings/internal/escaping.h
@@ -24,20 +24,19 @@
 namespace strings_internal {
 
 ABSL_CONST_INIT extern const char kBase64Chars[];
+ABSL_CONST_INIT extern const char kWebSafeBase64Chars[];
 
-// Calculates how long a string will be when it is base64 encoded given its
-// length and whether or not the result should be padded.
+// Calculates the length of a Base64 encoding (RFC 4648) of a string of length
+// `input_len`, with or without padding per `do_padding`. Note that 'web-safe'
+// encoding (section 5 of the RFC) does not change this length.
 size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding);
 
-// Base64-encodes `src` using the alphabet provided in `base64` and writes the
-// result to `dest`. If `do_padding` is true, `dest` is padded with '=' chars
-// until its length is a multiple of 3. Returns the length of `dest`.
+// Base64-encodes `src` using the alphabet provided in `base64` (which
+// determines whether to do web-safe encoding or not) and writes the result to
+// `dest`. If `do_padding` is true, `dest` is padded with '=' chars until its
+// length is a multiple of 3. Returns the length of `dest`.
 size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
                             size_t szdest, const char* base64, bool do_padding);
-
-// Base64-encodes `src` using the alphabet provided in `base64` and writes the
-// result to `dest`. If `do_padding` is true, `dest` is padded with '=' chars
-// until its length is a multiple of 3.
 template <typename String>
 void Base64EscapeInternal(const unsigned char* src, size_t szsrc, String* dest,
                           bool do_padding, const char* base64_chars) {
diff --git a/abseil-cpp/absl/strings/internal/has_absl_stringify.h b/abseil-cpp/absl/strings/internal/has_absl_stringify.h
new file mode 100644
index 0000000..55a0850
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/has_absl_stringify.h
@@ -0,0 +1,55 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_
+#define ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace strings_internal {
+
+// This is an empty class not intended to be used. It exists so that
+// `HasAbslStringify` can reference a universal class rather than needing to be
+// copied for each new sink.
+class UnimplementedSink {
+ public:
+  void Append(size_t count, char ch);
+
+  void Append(string_view v);
+
+  // Support `absl::Format(&sink, format, args...)`.
+  friend void AbslFormatFlush(UnimplementedSink* sink, absl::string_view v);
+};
+
+template <typename T, typename = void>
+struct HasAbslStringify : std::false_type {};
+
+template <typename T>
+struct HasAbslStringify<
+    T, std::enable_if_t<std::is_void<decltype(AbslStringify(
+           std::declval<strings_internal::UnimplementedSink&>(),
+           std::declval<const T&>()))>::value>> : std::true_type {};
+
+}  // namespace strings_internal
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_
diff --git a/abseil-cpp/absl/strings/internal/memutil.cc b/abseil-cpp/absl/strings/internal/memutil.cc
index 2519c68..e2e7347 100644
--- a/abseil-cpp/absl/strings/internal/memutil.cc
+++ b/abseil-cpp/absl/strings/internal/memutil.cc
@@ -16,6 +16,8 @@
 
 #include <cstdlib>
 
+#include "absl/strings/ascii.h"
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
@@ -33,80 +35,6 @@
   return 0;
 }
 
-char* memdup(const char* s, size_t slen) {
-  void* copy;
-  if ((copy = malloc(slen)) == nullptr) return nullptr;
-  memcpy(copy, s, slen);
-  return reinterpret_cast<char*>(copy);
-}
-
-char* memrchr(const char* s, int c, size_t slen) {
-  for (const char* e = s + slen - 1; e >= s; e--) {
-    if (*e == c) return const_cast<char*>(e);
-  }
-  return nullptr;
-}
-
-size_t memspn(const char* s, size_t slen, const char* accept) {
-  const char* p = s;
-  const char* spanp;
-  char c, sc;
-
-cont:
-  c = *p++;
-  if (slen-- == 0) return p - 1 - s;
-  for (spanp = accept; (sc = *spanp++) != '\0';)
-    if (sc == c) goto cont;
-  return p - 1 - s;
-}
-
-size_t memcspn(const char* s, size_t slen, const char* reject) {
-  const char* p = s;
-  const char* spanp;
-  char c, sc;
-
-  while (slen-- != 0) {
-    c = *p++;
-    for (spanp = reject; (sc = *spanp++) != '\0';)
-      if (sc == c) return p - 1 - s;
-  }
-  return p - s;
-}
-
-char* mempbrk(const char* s, size_t slen, const char* accept) {
-  const char* scanp;
-  int sc;
-
-  for (; slen; ++s, --slen) {
-    for (scanp = accept; (sc = *scanp++) != '\0';)
-      if (sc == *s) return const_cast<char*>(s);
-  }
-  return nullptr;
-}
-
-// This is significantly faster for case-sensitive matches with very
-// few possible matches.  See unit test for benchmarks.
-const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
-                     size_t neelen) {
-  if (0 == neelen) {
-    return phaystack;  // even if haylen is 0
-  }
-  if (haylen < neelen) return nullptr;
-
-  const char* match;
-  const char* hayend = phaystack + haylen - neelen + 1;
-  // A static cast is used here to work around the fact that memchr returns
-  // a void* on Posix-compliant systems and const void* on Windows.
-  while ((match = static_cast<const char*>(
-              memchr(phaystack, pneedle[0], hayend - phaystack)))) {
-    if (memcmp(match, pneedle, neelen) == 0)
-      return match;
-    else
-      phaystack = match + 1;
-  }
-  return nullptr;
-}
-
 }  // namespace strings_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/memutil.h b/abseil-cpp/absl/strings/internal/memutil.h
index 9ad0535..b5911a0 100644
--- a/abseil-cpp/absl/strings/internal/memutil.h
+++ b/abseil-cpp/absl/strings/internal/memutil.h
@@ -14,51 +14,6 @@
 // limitations under the License.
 //
 
-// These routines provide mem versions of standard C string routines,
-// such as strpbrk.  They function exactly the same as the str versions,
-// so if you wonder what they are, replace the word "mem" by
-// "str" and check out the man page.  I could return void*, as the
-// strutil.h mem*() routines tend to do, but I return char* instead
-// since this is by far the most common way these functions are called.
-//
-// The difference between the mem and str versions is the mem version
-// takes a pointer and a length, rather than a '\0'-terminated string.
-// The memcase* routines defined here assume the locale is "C"
-// (they use absl::ascii_tolower instead of tolower).
-//
-// These routines are based on the BSD library.
-//
-// Here's a list of routines from string.h, and their mem analogues.
-// Functions in lowercase are defined in string.h; those in UPPERCASE
-// are defined here:
-//
-// strlen                  --
-// strcat strncat          MEMCAT
-// strcpy strncpy          memcpy
-// --                      memccpy   (very cool function, btw)
-// --                      memmove
-// --                      memset
-// strcmp strncmp          memcmp
-// strcasecmp strncasecmp  MEMCASECMP
-// strchr                  memchr
-// strcoll                 --
-// strxfrm                 --
-// strdup strndup          MEMDUP
-// strrchr                 MEMRCHR
-// strspn                  MEMSPN
-// strcspn                 MEMCSPN
-// strpbrk                 MEMPBRK
-// strstr                  MEMSTR MEMMEM
-// (g)strcasestr           MEMCASESTR MEMCASEMEM
-// strtok                  --
-// strprefix               MEMPREFIX      (strprefix is from strutil.h)
-// strcaseprefix           MEMCASEPREFIX  (strcaseprefix is from strutil.h)
-// strsuffix               MEMSUFFIX      (strsuffix is from strutil.h)
-// strcasesuffix           MEMCASESUFFIX  (strcasesuffix is from strutil.h)
-// --                      MEMIS
-// --                      MEMCASEIS
-// strcount                MEMCOUNT       (strcount is from strutil.h)
-
 #ifndef ABSL_STRINGS_INTERNAL_MEMUTIL_H_
 #define ABSL_STRINGS_INTERNAL_MEMUTIL_H_
 
@@ -72,74 +27,11 @@
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
 
-inline char* memcat(char* dest, size_t destlen, const char* src,
-                    size_t srclen) {
-  return reinterpret_cast<char*>(memcpy(dest + destlen, src, srclen));
-}
-
+// Performs a byte-by-byte comparison of `len` bytes of the strings `s1` and
+// `s2`, ignoring the case of the characters. It returns an integer less than,
+// equal to, or greater than zero if `s1` is found, respectively, to be less
+// than, to match, or be greater than `s2`.
 int memcasecmp(const char* s1, const char* s2, size_t len);
-char* memdup(const char* s, size_t slen);
-char* memrchr(const char* s, int c, size_t slen);
-size_t memspn(const char* s, size_t slen, const char* accept);
-size_t memcspn(const char* s, size_t slen, const char* reject);
-char* mempbrk(const char* s, size_t slen, const char* accept);
-
-// This is for internal use only.  Don't call this directly
-template <bool case_sensitive>
-const char* int_memmatch(const char* haystack, size_t haylen,
-                         const char* needle, size_t neelen) {
-  if (0 == neelen) {
-    return haystack;  // even if haylen is 0
-  }
-  const char* hayend = haystack + haylen;
-  const char* needlestart = needle;
-  const char* needleend = needlestart + neelen;
-
-  for (; haystack < hayend; ++haystack) {
-    char hay = case_sensitive
-                   ? *haystack
-                   : absl::ascii_tolower(static_cast<unsigned char>(*haystack));
-    char nee = case_sensitive
-                   ? *needle
-                   : absl::ascii_tolower(static_cast<unsigned char>(*needle));
-    if (hay == nee) {
-      if (++needle == needleend) {
-        return haystack + 1 - neelen;
-      }
-    } else if (needle != needlestart) {
-      // must back up haystack in case a prefix matched (find "aab" in "aaab")
-      haystack -= needle - needlestart;  // for loop will advance one more
-      needle = needlestart;
-    }
-  }
-  return nullptr;
-}
-
-// These are the guys you can call directly
-inline const char* memstr(const char* phaystack, size_t haylen,
-                          const char* pneedle) {
-  return int_memmatch<true>(phaystack, haylen, pneedle, strlen(pneedle));
-}
-
-inline const char* memcasestr(const char* phaystack, size_t haylen,
-                              const char* pneedle) {
-  return int_memmatch<false>(phaystack, haylen, pneedle, strlen(pneedle));
-}
-
-inline const char* memmem(const char* phaystack, size_t haylen,
-                          const char* pneedle, size_t needlelen) {
-  return int_memmatch<true>(phaystack, haylen, pneedle, needlelen);
-}
-
-inline const char* memcasemem(const char* phaystack, size_t haylen,
-                              const char* pneedle, size_t needlelen) {
-  return int_memmatch<false>(phaystack, haylen, pneedle, needlelen);
-}
-
-// This is significantly faster for case-sensitive matches with very
-// few possible matches.  See unit test for benchmarks.
-const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
-                     size_t neelen);
 
 }  // namespace strings_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/strings/internal/memutil_benchmark.cc b/abseil-cpp/absl/strings/internal/memutil_benchmark.cc
index dc95c3e..61e323a 100644
--- a/abseil-cpp/absl/strings/internal/memutil_benchmark.cc
+++ b/abseil-cpp/absl/strings/internal/memutil_benchmark.cc
@@ -25,62 +25,6 @@
 // - an easy search: 'b'
 // - a medium search: 'ab'.  That means every letter is a possible match.
 // - a pathological search: 'aaaaaa.......aaaaab' (half as many a's as haytack)
-// We benchmark case-sensitive and case-insensitive versions of
-// three memmem implementations:
-// - memmem() from memutil.h
-// - search() from STL
-// - memmatch(), a custom implementation using memchr and memcmp.
-// Here are sample results:
-//
-// Run on (12 X 3800 MHz CPU s)
-// CPU Caches:
-//   L1 Data 32K (x6)
-//   L1 Instruction 32K (x6)
-//   L2 Unified 256K (x6)
-//   L3 Unified 15360K (x1)
-// ----------------------------------------------------------------
-// Benchmark                           Time          CPU Iterations
-// ----------------------------------------------------------------
-// BM_Memmem                        3583 ns      3582 ns     196469  2.59966GB/s
-// BM_MemmemMedium                 13743 ns     13742 ns      50901  693.986MB/s
-// BM_MemmemPathological        13695030 ns  13693977 ns         51  713.133kB/s
-// BM_Memcasemem                    3299 ns      3299 ns     212942  2.82309GB/s
-// BM_MemcasememMedium             16407 ns     16406 ns      42170  581.309MB/s
-// BM_MemcasememPathological    17267745 ns  17266030 ns         41  565.598kB/s
-// BM_Search                        1610 ns      1609 ns     431321  5.78672GB/s
-// BM_SearchMedium                 11111 ns     11110 ns      63001  858.414MB/s
-// BM_SearchPathological        12117390 ns  12116397 ns         58  805.984kB/s
-// BM_Searchcase                    3081 ns      3081 ns     229949  3.02313GB/s
-// BM_SearchcaseMedium             16003 ns     16001 ns      44170  595.998MB/s
-// BM_SearchcasePathological    15823413 ns  15821909 ns         44  617.222kB/s
-// BM_Memmatch                       197 ns       197 ns    3584225  47.2951GB/s
-// BM_MemmatchMedium               52333 ns     52329 ns      13280  182.244MB/s
-// BM_MemmatchPathological        659799 ns    659727 ns       1058  14.4556MB/s
-// BM_Memcasematch                  5460 ns      5460 ns     127606  1.70586GB/s
-// BM_MemcasematchMedium           32861 ns     32857 ns      21258  290.248MB/s
-// BM_MemcasematchPathological  15154243 ns  15153089 ns         46  644.464kB/s
-// BM_MemmemStartup                    5 ns         5 ns  150821500
-// BM_SearchStartup                    5 ns         5 ns  150644203
-// BM_MemmatchStartup                  7 ns         7 ns   97068802
-//
-// Conclusions:
-//
-// The following recommendations are based on the sample results above. However,
-// we have found that the performance of STL search can vary significantly
-// depending on compiler and standard library implementation. We recommend you
-// run the benchmarks for yourself on relevant platforms.
-//
-// If you need case-insensitive, STL search is slightly better than memmem for
-// all cases.
-//
-// Case-sensitive is more subtle:
-// Custom memmatch is _very_ fast at scanning, so if you have very few possible
-// matches in your haystack, that's the way to go. Performance drops
-// significantly with more matches.
-//
-// STL search is slightly faster than memmem in the medium and pathological
-// benchmarks. However, the performance of memmem is currently more dependable
-// across platforms and build configurations.
 
 namespace {
 
@@ -94,96 +38,10 @@
 }
 const char* const kHaystack = MakeHaystack();
 
-void BM_Memmem(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        absl::strings_internal::memmem(kHaystack, kHaystackSize, "b", 1));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Memmem);
-
-void BM_MemmemMedium(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        absl::strings_internal::memmem(kHaystack, kHaystackSize, "ab", 2));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmemMedium);
-
-void BM_MemmemPathological(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(absl::strings_internal::memmem(
-        kHaystack, kHaystackSize, kHaystack + kHaystackSize / 2,
-        kHaystackSize - kHaystackSize / 2));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmemPathological);
-
-void BM_Memcasemem(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        absl::strings_internal::memcasemem(kHaystack, kHaystackSize, "b", 1));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Memcasemem);
-
-void BM_MemcasememMedium(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        absl::strings_internal::memcasemem(kHaystack, kHaystackSize, "ab", 2));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemcasememMedium);
-
-void BM_MemcasememPathological(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(absl::strings_internal::memcasemem(
-        kHaystack, kHaystackSize, kHaystack + kHaystackSize / 2,
-        kHaystackSize - kHaystackSize / 2));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemcasememPathological);
-
 bool case_eq(const char a, const char b) {
   return absl::ascii_tolower(a) == absl::ascii_tolower(b);
 }
 
-void BM_Search(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
-                                         kHaystack + kHaystackSize - 1,
-                                         kHaystack + kHaystackSize));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Search);
-
-void BM_SearchMedium(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
-                                         kHaystack + kHaystackSize - 2,
-                                         kHaystack + kHaystackSize));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_SearchMedium);
-
-void BM_SearchPathological(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
-                                         kHaystack + kHaystackSize / 2,
-                                         kHaystack + kHaystackSize));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_SearchPathological);
-
 void BM_Searchcase(benchmark::State& state) {
   for (auto _ : state) {
     benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
@@ -241,34 +99,6 @@
   return nullptr;
 }
 
-void BM_Memmatch(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        absl::strings_internal::memmatch(kHaystack, kHaystackSize, "b", 1));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Memmatch);
-
-void BM_MemmatchMedium(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        absl::strings_internal::memmatch(kHaystack, kHaystackSize, "ab", 2));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmatchMedium);
-
-void BM_MemmatchPathological(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(absl::strings_internal::memmatch(
-        kHaystack, kHaystackSize, kHaystack + kHaystackSize / 2,
-        kHaystackSize - kHaystackSize / 2));
-  }
-  state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmatchPathological);
-
 void BM_Memcasematch(benchmark::State& state) {
   for (auto _ : state) {
     benchmark::DoNotOptimize(memcasematch(kHaystack, kHaystackSize, "b", 1));
@@ -295,29 +125,4 @@
 }
 BENCHMARK(BM_MemcasematchPathological);
 
-void BM_MemmemStartup(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(absl::strings_internal::memmem(
-        kHaystack + kHaystackSize - 10, 10, kHaystack + kHaystackSize - 1, 1));
-  }
-}
-BENCHMARK(BM_MemmemStartup);
-
-void BM_SearchStartup(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(
-        std::search(kHaystack + kHaystackSize - 10, kHaystack + kHaystackSize,
-                    kHaystack + kHaystackSize - 1, kHaystack + kHaystackSize));
-  }
-}
-BENCHMARK(BM_SearchStartup);
-
-void BM_MemmatchStartup(benchmark::State& state) {
-  for (auto _ : state) {
-    benchmark::DoNotOptimize(absl::strings_internal::memmatch(
-        kHaystack + kHaystackSize - 10, 10, kHaystack + kHaystackSize - 1, 1));
-  }
-}
-BENCHMARK(BM_MemmatchStartup);
-
 }  // namespace
diff --git a/abseil-cpp/absl/strings/internal/memutil_test.cc b/abseil-cpp/absl/strings/internal/memutil_test.cc
index d8681dd..277be2c 100644
--- a/abseil-cpp/absl/strings/internal/memutil_test.cc
+++ b/abseil-cpp/absl/strings/internal/memutil_test.cc
@@ -19,42 +19,12 @@
 #include <cstdlib>
 
 #include "gtest/gtest.h"
-#include "absl/strings/ascii.h"
 
 namespace {
 
-static char* memcasechr(const char* s, int c, size_t slen) {
-  c = absl::ascii_tolower(c);
-  for (; slen; ++s, --slen) {
-    if (absl::ascii_tolower(*s) == c) return const_cast<char*>(s);
-  }
-  return nullptr;
-}
-
-static const char* memcasematch(const char* phaystack, size_t haylen,
-                                const char* pneedle, size_t neelen) {
-  if (0 == neelen) {
-    return phaystack;  // even if haylen is 0
-  }
-  if (haylen < neelen) return nullptr;
-
-  const char* match;
-  const char* hayend = phaystack + haylen - neelen + 1;
-  while ((match = static_cast<char*>(
-              memcasechr(phaystack, pneedle[0], hayend - phaystack)))) {
-    if (absl::strings_internal::memcasecmp(match, pneedle, neelen) == 0)
-      return match;
-    else
-      phaystack = match + 1;
-  }
-  return nullptr;
-}
-
-TEST(MemUtilTest, AllTests) {
+TEST(MemUtil, memcasecmp) {
   // check memutil functions
-  char a[1000];
-  absl::strings_internal::memcat(a, 0, "hello", sizeof("hello") - 1);
-  absl::strings_internal::memcat(a, 5, " there", sizeof(" there") - 1);
+  const char a[] = "hello there";
 
   EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO there",
                                                sizeof("hello there") - 1),
@@ -66,114 +36,6 @@
                                                sizeof("hello there") - 2),
             0);
   EXPECT_EQ(absl::strings_internal::memcasecmp(a, "whatever", 0), 0);
-
-  char* p = absl::strings_internal::memdup("hello", 5);
-  free(p);
-
-  p = absl::strings_internal::memrchr("hello there", 'e',
-                                      sizeof("hello there") - 1);
-  EXPECT_TRUE(p && p[-1] == 'r');
-  p = absl::strings_internal::memrchr("hello there", 'e',
-                                      sizeof("hello there") - 2);
-  EXPECT_TRUE(p && p[-1] == 'h');
-  p = absl::strings_internal::memrchr("hello there", 'u',
-                                      sizeof("hello there") - 1);
-  EXPECT_TRUE(p == nullptr);
-
-  int len = absl::strings_internal::memspn("hello there",
-                                           sizeof("hello there") - 1, "hole");
-  EXPECT_EQ(len, sizeof("hello") - 1);
-  len = absl::strings_internal::memspn("hello there", sizeof("hello there") - 1,
-                                       "u");
-  EXPECT_EQ(len, 0);
-  len = absl::strings_internal::memspn("hello there", sizeof("hello there") - 1,
-                                       "");
-  EXPECT_EQ(len, 0);
-  len = absl::strings_internal::memspn("hello there", sizeof("hello there") - 1,
-                                       "trole h");
-  EXPECT_EQ(len, sizeof("hello there") - 1);
-  len = absl::strings_internal::memspn("hello there!",
-                                       sizeof("hello there!") - 1, "trole h");
-  EXPECT_EQ(len, sizeof("hello there") - 1);
-  len = absl::strings_internal::memspn("hello there!",
-                                       sizeof("hello there!") - 2, "trole h!");
-  EXPECT_EQ(len, sizeof("hello there!") - 2);
-
-  len = absl::strings_internal::memcspn("hello there",
-                                        sizeof("hello there") - 1, "leho");
-  EXPECT_EQ(len, 0);
-  len = absl::strings_internal::memcspn("hello there",
-                                        sizeof("hello there") - 1, "u");
-  EXPECT_EQ(len, sizeof("hello there") - 1);
-  len = absl::strings_internal::memcspn("hello there",
-                                        sizeof("hello there") - 1, "");
-  EXPECT_EQ(len, sizeof("hello there") - 1);
-  len = absl::strings_internal::memcspn("hello there",
-                                        sizeof("hello there") - 1, " ");
-  EXPECT_EQ(len, 5);
-
-  p = absl::strings_internal::mempbrk("hello there", sizeof("hello there") - 1,
-                                      "leho");
-  EXPECT_TRUE(p && p[1] == 'e' && p[2] == 'l');
-  p = absl::strings_internal::mempbrk("hello there", sizeof("hello there") - 1,
-                                      "nu");
-  EXPECT_TRUE(p == nullptr);
-  p = absl::strings_internal::mempbrk("hello there!",
-                                      sizeof("hello there!") - 2, "!");
-  EXPECT_TRUE(p == nullptr);
-  p = absl::strings_internal::mempbrk("hello there", sizeof("hello there") - 1,
-                                      " t ");
-  EXPECT_TRUE(p && p[-1] == 'o' && p[1] == 't');
-
-  {
-    const char kHaystack[] = "0123456789";
-    EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 0, "", 0), kHaystack);
-    EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "012", 3),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "0xx", 1),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "789", 3),
-              kHaystack + 7);
-    EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "9xx", 1),
-              kHaystack + 9);
-    EXPECT_TRUE(absl::strings_internal::memmem(kHaystack, 10, "9xx", 3) ==
-                nullptr);
-    EXPECT_TRUE(absl::strings_internal::memmem(kHaystack, 10, "xxx", 1) ==
-                nullptr);
-  }
-  {
-    const char kHaystack[] = "aBcDeFgHiJ";
-    EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 0, "", 0),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "Abc", 3),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "Axx", 1),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "hIj", 3),
-              kHaystack + 7);
-    EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "jxx", 1),
-              kHaystack + 9);
-    EXPECT_TRUE(absl::strings_internal::memcasemem(kHaystack, 10, "jxx", 3) ==
-                nullptr);
-    EXPECT_TRUE(absl::strings_internal::memcasemem(kHaystack, 10, "xxx", 1) ==
-                nullptr);
-  }
-  {
-    const char kHaystack[] = "0123456789";
-    EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 0, "", 0), kHaystack);
-    EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "012", 3),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "0xx", 1),
-              kHaystack);
-    EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "789", 3),
-              kHaystack + 7);
-    EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "9xx", 1),
-              kHaystack + 9);
-    EXPECT_TRUE(absl::strings_internal::memmatch(kHaystack, 10, "9xx", 3) ==
-                nullptr);
-    EXPECT_TRUE(absl::strings_internal::memmatch(kHaystack, 10, "xxx", 1) ==
-                nullptr);
-  }
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/strings/internal/ostringstream.cc b/abseil-cpp/absl/strings/internal/ostringstream.cc
index 05324c7..a0e5ec0 100644
--- a/abseil-cpp/absl/strings/internal/ostringstream.cc
+++ b/abseil-cpp/absl/strings/internal/ostringstream.cc
@@ -14,20 +14,27 @@
 
 #include "absl/strings/internal/ostringstream.h"
 
+#include <cassert>
+#include <cstddef>
+#include <ios>
+#include <streambuf>
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
 
-OStringStream::Buf::int_type OStringStream::overflow(int c) {
-  assert(s_);
-  if (!Buf::traits_type::eq_int_type(c, Buf::traits_type::eof()))
-    s_->push_back(static_cast<char>(c));
+OStringStream::Streambuf::int_type OStringStream::Streambuf::overflow(int c) {
+  assert(str_);
+  if (!std::streambuf::traits_type::eq_int_type(
+          c, std::streambuf::traits_type::eof()))
+    str_->push_back(static_cast<char>(c));
   return 1;
 }
 
-std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) {
-  assert(s_);
-  s_->append(s, n);
+std::streamsize OStringStream::Streambuf::xsputn(const char* s,
+                                                 std::streamsize n) {
+  assert(str_);
+  str_->append(s, static_cast<size_t>(n));
   return n;
 }
 
diff --git a/abseil-cpp/absl/strings/internal/ostringstream.h b/abseil-cpp/absl/strings/internal/ostringstream.h
index d25d604..c0e237d 100644
--- a/abseil-cpp/absl/strings/internal/ostringstream.h
+++ b/abseil-cpp/absl/strings/internal/ostringstream.h
@@ -16,11 +16,13 @@
 #define ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_
 
 #include <cassert>
+#include <ios>
 #include <ostream>
 #include <streambuf>
 #include <string>
+#include <utility>
 
-#include "absl/base/port.h"
+#include "absl/base/config.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -60,26 +62,49 @@
 //   strm << 3.14;
 //
 // Note: flush() has no effect. No reason to call it.
-class OStringStream : private std::basic_streambuf<char>, public std::ostream {
+class OStringStream final : public std::ostream {
  public:
   // The argument can be null, in which case you'll need to call str(p) with a
   // non-null argument before you can write to the stream.
   //
   // The destructor of OStringStream doesn't use the std::string. It's OK to
   // destroy the std::string before the stream.
-  explicit OStringStream(std::string* s) : std::ostream(this), s_(s) {}
+  explicit OStringStream(std::string* str)
+      : std::ostream(&buf_), buf_(str) {}
+  OStringStream(OStringStream&& that)
+      : std::ostream(std::move(static_cast<std::ostream&>(that))),
+        buf_(that.buf_) {
+    rdbuf(&buf_);
+  }
+  OStringStream& operator=(OStringStream&& that) {
+    std::ostream::operator=(std::move(static_cast<std::ostream&>(that)));
+    buf_ = that.buf_;
+    rdbuf(&buf_);
+    return *this;
+  }
 
-  std::string* str() { return s_; }
-  const std::string* str() const { return s_; }
-  void str(std::string* s) { s_ = s; }
+  std::string* str() { return buf_.str(); }
+  const std::string* str() const { return buf_.str(); }
+  void str(std::string* str) { buf_.str(str); }
 
  private:
-  using Buf = std::basic_streambuf<char>;
+  class Streambuf final : public std::streambuf {
+   public:
+    explicit Streambuf(std::string* str) : str_(str) {}
+    Streambuf(const Streambuf&) = default;
+    Streambuf& operator=(const Streambuf&) = default;
 
-  Buf::int_type overflow(int c) override;
-  std::streamsize xsputn(const char* s, std::streamsize n) override;
+    std::string* str() { return str_; }
+    const std::string* str() const { return str_; }
+    void str(std::string* str) { str_ = str; }
 
-  std::string* s_;
+   protected:
+    int_type overflow(int c) override;
+    std::streamsize xsputn(const char* s, std::streamsize n) override;
+
+   private:
+    std::string* str_;
+  } buf_;
 };
 
 }  // namespace strings_internal
diff --git a/abseil-cpp/absl/strings/internal/ostringstream_test.cc b/abseil-cpp/absl/strings/internal/ostringstream_test.cc
index 2879e50..ef3ad57 100644
--- a/abseil-cpp/absl/strings/internal/ostringstream_test.cc
+++ b/abseil-cpp/absl/strings/internal/ostringstream_test.cc
@@ -14,10 +14,12 @@
 
 #include "absl/strings/internal/ostringstream.h"
 
+#include <ios>
 #include <memory>
 #include <ostream>
 #include <string>
 #include <type_traits>
+#include <utility>
 
 #include "gtest/gtest.h"
 
@@ -29,24 +31,51 @@
       "");
 }
 
-TEST(OStringStream, ConstructDestroy) {
+TEST(OStringStream, ConstructNullptr) {
+  absl::strings_internal::OStringStream strm(nullptr);
+  EXPECT_EQ(nullptr, strm.str());
+}
+
+TEST(OStringStream, ConstructStr) {
+  std::string s = "abc";
   {
-    absl::strings_internal::OStringStream strm(nullptr);
-    EXPECT_EQ(nullptr, strm.str());
+    absl::strings_internal::OStringStream strm(&s);
+    EXPECT_EQ(&s, strm.str());
   }
+  EXPECT_EQ("abc", s);
+}
+
+TEST(OStringStream, Destroy) {
+  std::unique_ptr<std::string> s(new std::string);
+  absl::strings_internal::OStringStream strm(s.get());
+  s.reset();
+}
+
+TEST(OStringStream, MoveConstruct) {
+  std::string s = "abc";
   {
-    std::string s = "abc";
-    {
-      absl::strings_internal::OStringStream strm(&s);
-      EXPECT_EQ(&s, strm.str());
-    }
-    EXPECT_EQ("abc", s);
+    absl::strings_internal::OStringStream strm1(&s);
+    strm1 << std::hex << 16;
+    EXPECT_EQ(&s, strm1.str());
+    absl::strings_internal::OStringStream strm2(std::move(strm1));
+    strm2 << 16;  // We should still be in base 16.
+    EXPECT_EQ(&s, strm2.str());
   }
+  EXPECT_EQ("abc1010", s);
+}
+
+TEST(OStringStream, MoveAssign) {
+  std::string s = "abc";
   {
-    std::unique_ptr<std::string> s(new std::string);
-    absl::strings_internal::OStringStream strm(s.get());
-    s.reset();
+    absl::strings_internal::OStringStream strm1(&s);
+    strm1 << std::hex << 16;
+    EXPECT_EQ(&s, strm1.str());
+    absl::strings_internal::OStringStream strm2(nullptr);
+    strm2 = std::move(strm1);
+    strm2 << 16;  // We should still be in base 16.
+    EXPECT_EQ(&s, strm2.str());
   }
+  EXPECT_EQ("abc1010", s);
 }
 
 TEST(OStringStream, Str) {
diff --git a/abseil-cpp/absl/strings/internal/resize_uninitialized.h b/abseil-cpp/absl/strings/internal/resize_uninitialized.h
index e42628e..49859dc 100644
--- a/abseil-cpp/absl/strings/internal/resize_uninitialized.h
+++ b/abseil-cpp/absl/strings/internal/resize_uninitialized.h
@@ -17,6 +17,7 @@
 #ifndef ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_
 #define ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_
 
+#include <algorithm>
 #include <string>
 #include <type_traits>
 #include <utility>
@@ -28,8 +29,9 @@
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
 
-// Is a subclass of true_type or false_type, depending on whether or not
-// T has a __resize_default_init member.
+// In this type trait, we look for a __resize_default_init member function, and
+// we use it if available, otherwise, we use resize. We provide HasMember to
+// indicate whether __resize_default_init is present.
 template <typename string_type, typename = void>
 struct ResizeUninitializedTraits {
   using HasMember = std::false_type;
@@ -66,6 +68,50 @@
   ResizeUninitializedTraits<string_type>::Resize(s, new_size);
 }
 
+// Used to ensure exponential growth so that the amortized complexity of
+// increasing the string size by a small amount is O(1), in contrast to
+// O(str->size()) in the case of precise growth.
+template <typename string_type>
+void STLStringReserveAmortized(string_type* s, size_t new_size) {
+  const size_t cap = s->capacity();
+  if (new_size > cap) {
+    // Make sure to always grow by at least a factor of 2x.
+    s->reserve((std::max)(new_size, 2 * cap));
+  }
+}
+
+// In this type trait, we look for an __append_default_init member function, and
+// we use it if available, otherwise, we use append.
+template <typename string_type, typename = void>
+struct AppendUninitializedTraits {
+  static void Append(string_type* s, size_t n) {
+    s->append(n, typename string_type::value_type());
+  }
+};
+
+template <typename string_type>
+struct AppendUninitializedTraits<
+    string_type, absl::void_t<decltype(std::declval<string_type&>()
+                                           .__append_default_init(237))> > {
+  static void Append(string_type* s, size_t n) {
+    s->__append_default_init(n);
+  }
+};
+
+// Like STLStringResizeUninitialized(str, new_size), except guaranteed to use
+// exponential growth so that the amortized complexity of increasing the string
+// size by a small amount is O(1), in contrast to O(str->size()) in the case of
+// precise growth.
+template <typename string_type>
+void STLStringResizeUninitializedAmortized(string_type* s, size_t new_size) {
+  const size_t size = s->size();
+  if (new_size > size) {
+    AppendUninitializedTraits<string_type>::Append(s, new_size - size);
+  } else {
+    s->erase(new_size);
+  }
+}
+
 }  // namespace strings_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc b/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc
index 0f8b3c2..ad1b9c5 100644
--- a/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc
+++ b/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc
@@ -19,64 +19,115 @@
 namespace {
 
 int resize_call_count = 0;
+int append_call_count = 0;
 
 // A mock string class whose only purpose is to track how many times its
-// resize() method has been called.
+// resize()/append() methods have been called.
 struct resizable_string {
+  using value_type = char;
   size_t size() const { return 0; }
+  size_t capacity() const { return 0; }
   char& operator[](size_t) {
     static char c = '\0';
     return c;
   }
   void resize(size_t) { resize_call_count += 1; }
+  void append(size_t, value_type) { append_call_count += 1; }
+  void reserve(size_t) {}
+  resizable_string& erase(size_t = 0, size_t = 0) { return *this; }
 };
 
 int resize_default_init_call_count = 0;
+int append_default_init_call_count = 0;
 
 // A mock string class whose only purpose is to track how many times its
-// resize() and __resize_default_init() methods have been called.
-struct resize_default_init_string {
+// resize()/__resize_default_init()/append()/__append_default_init() methods
+// have been called.
+struct default_init_string {
   size_t size() const { return 0; }
+  size_t capacity() const { return 0; }
   char& operator[](size_t) {
     static char c = '\0';
     return c;
   }
   void resize(size_t) { resize_call_count += 1; }
   void __resize_default_init(size_t) { resize_default_init_call_count += 1; }
+  void __append_default_init(size_t) { append_default_init_call_count += 1; }
+  void reserve(size_t) {}
+  default_init_string& erase(size_t = 0, size_t = 0) { return *this; }
 };
 
 TEST(ResizeUninit, WithAndWithout) {
   resize_call_count = 0;
+  append_call_count = 0;
   resize_default_init_call_count = 0;
+  append_default_init_call_count = 0;
   {
     resizable_string rs;
 
     EXPECT_EQ(resize_call_count, 0);
+    EXPECT_EQ(append_call_count, 0);
     EXPECT_EQ(resize_default_init_call_count, 0);
+    EXPECT_EQ(append_default_init_call_count, 0);
     EXPECT_FALSE(
         absl::strings_internal::STLStringSupportsNontrashingResize(&rs));
     EXPECT_EQ(resize_call_count, 0);
+    EXPECT_EQ(append_call_count, 0);
     EXPECT_EQ(resize_default_init_call_count, 0);
+    EXPECT_EQ(append_default_init_call_count, 0);
     absl::strings_internal::STLStringResizeUninitialized(&rs, 237);
     EXPECT_EQ(resize_call_count, 1);
+    EXPECT_EQ(append_call_count, 0);
     EXPECT_EQ(resize_default_init_call_count, 0);
+    EXPECT_EQ(append_default_init_call_count, 0);
+    absl::strings_internal::STLStringResizeUninitializedAmortized(&rs, 1000);
+    EXPECT_EQ(resize_call_count, 1);
+    EXPECT_EQ(append_call_count, 1);
+    EXPECT_EQ(resize_default_init_call_count, 0);
+    EXPECT_EQ(append_default_init_call_count, 0);
   }
 
   resize_call_count = 0;
+  append_call_count = 0;
   resize_default_init_call_count = 0;
+  append_default_init_call_count = 0;
   {
-    resize_default_init_string rus;
+    default_init_string rus;
 
     EXPECT_EQ(resize_call_count, 0);
+    EXPECT_EQ(append_call_count, 0);
     EXPECT_EQ(resize_default_init_call_count, 0);
+    EXPECT_EQ(append_default_init_call_count, 0);
     EXPECT_TRUE(
         absl::strings_internal::STLStringSupportsNontrashingResize(&rus));
     EXPECT_EQ(resize_call_count, 0);
+    EXPECT_EQ(append_call_count, 0);
     EXPECT_EQ(resize_default_init_call_count, 0);
+    EXPECT_EQ(append_default_init_call_count, 0);
     absl::strings_internal::STLStringResizeUninitialized(&rus, 237);
     EXPECT_EQ(resize_call_count, 0);
+    EXPECT_EQ(append_call_count, 0);
     EXPECT_EQ(resize_default_init_call_count, 1);
+    EXPECT_EQ(append_default_init_call_count, 0);
+    absl::strings_internal::STLStringResizeUninitializedAmortized(&rus, 1000);
+    EXPECT_EQ(resize_call_count, 0);
+    EXPECT_EQ(append_call_count, 0);
+    EXPECT_EQ(resize_default_init_call_count, 1);
+    EXPECT_EQ(append_default_init_call_count, 1);
   }
 }
 
+TEST(ResizeUninit, Amortized) {
+  std::string str;
+  size_t prev_cap = str.capacity();
+  int cap_increase_count = 0;
+  for (int i = 0; i < 1000; ++i) {
+    absl::strings_internal::STLStringResizeUninitializedAmortized(&str, i);
+    size_t new_cap = str.capacity();
+    if (new_cap > prev_cap) ++cap_increase_count;
+    prev_cap = new_cap;
+  }
+  EXPECT_LT(cap_increase_count, 50);
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/strings/internal/stl_type_traits.h b/abseil-cpp/absl/strings/internal/stl_type_traits.h
index 6035ca4..e50468b 100644
--- a/abseil-cpp/absl/strings/internal/stl_type_traits.h
+++ b/abseil-cpp/absl/strings/internal/stl_type_traits.h
@@ -13,7 +13,7 @@
 // limitations under the License.
 //
 
-// Thie file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
+// The file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
 // trait metafunction to assist in working with the _GLIBCXX_DEBUG debug
 // wrappers of STL containers.
 //
diff --git a/abseil-cpp/absl/strings/internal/str_format/arg.cc b/abseil-cpp/absl/strings/internal/str_format/arg.cc
index 9feb224..c0a9a28 100644
--- a/abseil-cpp/absl/strings/internal/str_format/arg.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/arg.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 //
 // POSIX spec:
 //   http://pubs.opengroup.org/onlinepubs/009695399/functions/fprintf.html
@@ -63,7 +77,7 @@
       v >>= 3;
     } while (v);
     start_ = p;
-    size_ = storage_ + sizeof(storage_) - p;
+    size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
   }
 
   // Print the signed or unsigned integer as decimal.
@@ -72,7 +86,8 @@
   void PrintAsDec(T v) {
     static_assert(std::is_integral<T>::value, "");
     start_ = storage_;
-    size_ = numbers_internal::FastIntToBuffer(v, storage_) - storage_;
+    size_ = static_cast<size_t>(numbers_internal::FastIntToBuffer(v, storage_) -
+                                storage_);
   }
 
   void PrintAsDec(int128 v) {
@@ -91,7 +106,7 @@
     char *p = storage_ + sizeof(storage_);
     do {
       p -= 2;
-      numbers_internal::PutTwoDigits(static_cast<size_t>(v % 100), p);
+      numbers_internal::PutTwoDigits(static_cast<uint32_t>(v % 100), p);
       v /= 100;
     } while (v);
     if (p[0] == '0') {
@@ -101,7 +116,7 @@
     if (add_neg) {
       *--p = '-';
     }
-    size_ = storage_ + sizeof(storage_) - p;
+    size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
     start_ = p;
   }
 
@@ -124,7 +139,7 @@
       ++p;
     }
     start_ = p;
-    size_ = storage_ + sizeof(storage_) - p;
+    size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
   }
 
   // Print the unsigned integer as hex using uppercase.
@@ -140,7 +155,7 @@
       v >>= 4;
     } while (v);
     start_ = p;
-    size_ = storage_ + sizeof(storage_) - p;
+    size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
   }
 
   // The printed value including the '-' sign if available.
@@ -194,10 +209,12 @@
   return {};
 }
 
-bool ConvertCharImpl(unsigned char v, const FormatConversionSpecImpl conv,
-                     FormatSinkImpl *sink) {
+bool ConvertCharImpl(char v,
+                     const FormatConversionSpecImpl conv,
+                     FormatSinkImpl* sink) {
   size_t fill = 0;
-  if (conv.width() >= 0) fill = conv.width();
+  if (conv.width() >= 0)
+    fill = static_cast<size_t>(conv.width());
   ReducePadding(1, &fill);
   if (!conv.has_left_flag()) sink->Append(fill, ' ');
   sink->Append(1, v);
@@ -211,7 +228,8 @@
   // Print as a sequence of Substrings:
   //   [left_spaces][sign][base_indicator][zeroes][formatted][right_spaces]
   size_t fill = 0;
-  if (conv.width() >= 0) fill = conv.width();
+  if (conv.width() >= 0)
+    fill = static_cast<size_t>(conv.width());
 
   string_view formatted = as_digits.without_neg_or_zero();
   ReducePadding(formatted, &fill);
@@ -222,10 +240,9 @@
   string_view base_indicator = BaseIndicator(as_digits, conv);
   ReducePadding(base_indicator, &fill);
 
-  int precision = conv.precision();
-  bool precision_specified = precision >= 0;
-  if (!precision_specified)
-    precision = 1;
+  bool precision_specified = conv.precision() >= 0;
+  size_t precision =
+      precision_specified ? static_cast<size_t>(conv.precision()) : size_t{1};
 
   if (conv.has_alt_flag() &&
       conv.conversion_char() == FormatConversionCharInternal::o) {
@@ -233,7 +250,7 @@
     //   "For o conversion, it increases the precision (if necessary) to
     //   force the first digit of the result to be zero."
     if (formatted.empty() || *formatted.begin() != '0') {
-      int needed = static_cast<int>(formatted.size()) + 1;
+      size_t needed = formatted.size() + 1;
       precision = std::max(precision, needed);
     }
   }
@@ -262,8 +279,38 @@
 }
 
 template <typename T>
-bool ConvertIntArg(T v, const FormatConversionSpecImpl conv,
-                   FormatSinkImpl *sink) {
+bool ConvertFloatArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
+  if (conv.conversion_char() == FormatConversionCharInternal::v) {
+    conv.set_conversion_char(FormatConversionCharInternal::g);
+  }
+
+  return FormatConversionCharIsFloat(conv.conversion_char()) &&
+         ConvertFloatImpl(v, conv, sink);
+}
+
+inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv,
+                             FormatSinkImpl *sink) {
+  if (conv.is_basic()) {
+    sink->Append(v);
+    return true;
+  }
+  return sink->PutPaddedString(v, conv.width(), conv.precision(),
+                               conv.has_left_flag());
+}
+
+}  // namespace
+
+bool ConvertBoolArg(bool v, FormatSinkImpl *sink) {
+  if (v) {
+    sink->Append("true");
+  } else {
+    sink->Append("false");
+  }
+  return true;
+}
+
+template <typename T>
+bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
   using U = typename MakeUnsigned<T>::type;
   IntDigits as_digits;
 
@@ -273,7 +320,7 @@
   // FormatConversionChar is declared, but not defined.
   switch (static_cast<uint8_t>(conv.conversion_char())) {
     case static_cast<uint8_t>(FormatConversionCharInternal::c):
-      return ConvertCharImpl(static_cast<unsigned char>(v), conv, sink);
+      return ConvertCharImpl(static_cast<char>(v), conv, sink);
 
     case static_cast<uint8_t>(FormatConversionCharInternal::o):
       as_digits.PrintAsOct(static_cast<U>(v));
@@ -292,6 +339,7 @@
 
     case static_cast<uint8_t>(FormatConversionCharInternal::d):
     case static_cast<uint8_t>(FormatConversionCharInternal::i):
+    case static_cast<uint8_t>(FormatConversionCharInternal::v):
       as_digits.PrintAsDec(v);
       break;
 
@@ -306,7 +354,7 @@
       return ConvertFloatImpl(static_cast<double>(v), conv, sink);
 
     default:
-       ABSL_INTERNAL_ASSUME(false);
+      ABSL_ASSUME(false);
   }
 
   if (conv.is_basic()) {
@@ -316,24 +364,37 @@
   return ConvertIntImplInnerSlow(as_digits, conv, sink);
 }
 
-template <typename T>
-bool ConvertFloatArg(T v, const FormatConversionSpecImpl conv,
-                     FormatSinkImpl *sink) {
-  return FormatConversionCharIsFloat(conv.conversion_char()) &&
-         ConvertFloatImpl(v, conv, sink);
-}
-
-inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv,
-                             FormatSinkImpl *sink) {
-  if (conv.is_basic()) {
-    sink->Append(v);
-    return true;
-  }
-  return sink->PutPaddedString(v, conv.width(), conv.precision(),
-                               conv.has_left_flag());
-}
-
-}  // namespace
+template bool ConvertIntArg<char>(char v, FormatConversionSpecImpl conv,
+                                  FormatSinkImpl *sink);
+template bool ConvertIntArg<signed char>(signed char v,
+                                         FormatConversionSpecImpl conv,
+                                         FormatSinkImpl *sink);
+template bool ConvertIntArg<unsigned char>(unsigned char v,
+                                           FormatConversionSpecImpl conv,
+                                           FormatSinkImpl *sink);
+template bool ConvertIntArg<short>(short v,  // NOLINT
+                                   FormatConversionSpecImpl conv,
+                                   FormatSinkImpl *sink);
+template bool ConvertIntArg<unsigned short>(unsigned short v,  // NOLINT
+                                            FormatConversionSpecImpl conv,
+                                            FormatSinkImpl *sink);
+template bool ConvertIntArg<int>(int v, FormatConversionSpecImpl conv,
+                                 FormatSinkImpl *sink);
+template bool ConvertIntArg<unsigned int>(unsigned int v,
+                                          FormatConversionSpecImpl conv,
+                                          FormatSinkImpl *sink);
+template bool ConvertIntArg<long>(long v,  // NOLINT
+                                  FormatConversionSpecImpl conv,
+                                  FormatSinkImpl *sink);
+template bool ConvertIntArg<unsigned long>(unsigned long v,  // NOLINT
+                                           FormatConversionSpecImpl conv,
+                                           FormatSinkImpl *sink);
+template bool ConvertIntArg<long long>(long long v,  // NOLINT
+                                       FormatConversionSpecImpl conv,
+                                       FormatSinkImpl *sink);
+template bool ConvertIntArg<unsigned long long>(unsigned long long v,  // NOLINT
+                                                FormatConversionSpecImpl conv,
+                                                FormatSinkImpl *sink);
 
 // ==================== Strings ====================
 StringConvertResult FormatConvertImpl(const std::string &v,
@@ -361,7 +422,7 @@
     len = std::strlen(v);
   } else {
     // If precision is set, we look for the NUL-terminator on the valid range.
-    len = std::find(v, v + conv.precision(), '\0') - v;
+    len = static_cast<size_t>(std::find(v, v + conv.precision(), '\0') - v);
   }
   return {ConvertStringArg(string_view(v, len), conv, sink)};
 }
@@ -396,11 +457,12 @@
 }
 
 // ==================== Chars ====================
-IntegralConvertResult FormatConvertImpl(char v,
-                                        const FormatConversionSpecImpl conv,
-                                        FormatSinkImpl *sink) {
+CharConvertResult FormatConvertImpl(char v, const FormatConversionSpecImpl conv,
+                                    FormatSinkImpl *sink) {
   return {ConvertIntArg(v, conv, sink)};
 }
+
+// ==================== Ints ====================
 IntegralConvertResult FormatConvertImpl(signed char v,
                                         const FormatConversionSpecImpl conv,
                                         FormatSinkImpl *sink) {
@@ -411,8 +473,6 @@
                                         FormatSinkImpl *sink) {
   return {ConvertIntArg(v, conv, sink)};
 }
-
-// ==================== Ints ====================
 IntegralConvertResult FormatConvertImpl(short v,  // NOLINT
                                         const FormatConversionSpecImpl conv,
                                         FormatSinkImpl *sink) {
diff --git a/abseil-cpp/absl/strings/internal/str_format/arg.h b/abseil-cpp/absl/strings/internal/str_format/arg.h
index 3dbc152..3ce30fe 100644
--- a/abseil-cpp/absl/strings/internal/str_format/arg.h
+++ b/abseil-cpp/absl/strings/internal/str_format/arg.h
@@ -1,9 +1,24 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_
 #define ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_
 
 #include <string.h>
 #include <wchar.h>
 
+#include <algorithm>
 #include <cstdio>
 #include <iomanip>
 #include <limits>
@@ -11,10 +26,12 @@
 #include <sstream>
 #include <string>
 #include <type_traits>
+#include <utility>
 
 #include "absl/base/port.h"
 #include "absl/meta/type_traits.h"
 #include "absl/numeric/int128.h"
+#include "absl/strings/internal/has_absl_stringify.h"
 #include "absl/strings/internal/str_format/extension.h"
 #include "absl/strings/string_view.h"
 
@@ -31,6 +48,24 @@
 
 namespace str_format_internal {
 
+template <FormatConversionCharSet C>
+struct ArgConvertResult {
+  bool value;
+};
+
+using IntegralConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
+    FormatConversionCharSetInternal::c,
+    FormatConversionCharSetInternal::kNumeric,
+    FormatConversionCharSetInternal::kStar,
+    FormatConversionCharSetInternal::v)>;
+using FloatingConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
+    FormatConversionCharSetInternal::kFloating,
+    FormatConversionCharSetInternal::v)>;
+using CharConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
+    FormatConversionCharSetInternal::c,
+    FormatConversionCharSetInternal::kNumeric,
+    FormatConversionCharSetInternal::kStar)>;
+
 template <typename T, typename = void>
 struct HasUserDefinedConvert : std::false_type {};
 
@@ -41,7 +76,50 @@
                                     std::declval<FormatSink*>()))>>
     : std::true_type {};
 
-void AbslFormatConvert();  // Stops the lexical name lookup
+// These declarations prevent ADL lookup from continuing in absl namespaces,
+// we are deliberately using these as ADL hooks and want them to consider
+// non-absl namespaces only.
+void AbslFormatConvert();
+void AbslStringify();
+
+template <typename T>
+bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink);
+
+// Forward declarations of internal `ConvertIntArg` function template
+// instantiations are here to avoid including the template body in the headers
+// and instantiating it in large numbers of translation units. Explicit
+// instantiations can be found in "absl/strings/internal/str_format/arg.cc"
+extern template bool ConvertIntArg<char>(char v, FormatConversionSpecImpl conv,
+                                         FormatSinkImpl* sink);
+extern template bool ConvertIntArg<signed char>(signed char v,
+                                                FormatConversionSpecImpl conv,
+                                                FormatSinkImpl* sink);
+extern template bool ConvertIntArg<unsigned char>(unsigned char v,
+                                                  FormatConversionSpecImpl conv,
+                                                  FormatSinkImpl* sink);
+extern template bool ConvertIntArg<short>(short v,  // NOLINT
+                                          FormatConversionSpecImpl conv,
+                                          FormatSinkImpl* sink);
+extern template bool ConvertIntArg<unsigned short>(   // NOLINT
+    unsigned short v, FormatConversionSpecImpl conv,  // NOLINT
+    FormatSinkImpl* sink);
+extern template bool ConvertIntArg<int>(int v, FormatConversionSpecImpl conv,
+                                        FormatSinkImpl* sink);
+extern template bool ConvertIntArg<unsigned int>(unsigned int v,
+                                                 FormatConversionSpecImpl conv,
+                                                 FormatSinkImpl* sink);
+extern template bool ConvertIntArg<long>(                           // NOLINT
+    long v, FormatConversionSpecImpl conv, FormatSinkImpl* sink);   // NOLINT
+extern template bool ConvertIntArg<unsigned long>(unsigned long v,  // NOLINT
+                                                  FormatConversionSpecImpl conv,
+                                                  FormatSinkImpl* sink);
+extern template bool ConvertIntArg<long long>(long long v,  // NOLINT
+                                              FormatConversionSpecImpl conv,
+                                              FormatSinkImpl* sink);
+extern template bool ConvertIntArg<unsigned long long>(   // NOLINT
+    unsigned long long v, FormatConversionSpecImpl conv,  // NOLINT
+    FormatSinkImpl* sink);
+
 template <typename T>
 auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv,
                        FormatSinkImpl* sink)
@@ -58,6 +136,39 @@
 }
 
 template <typename T>
+auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv,
+                       FormatSinkImpl* sink)
+    -> std::enable_if_t<std::is_enum<T>::value &&
+                            std::is_void<decltype(AbslStringify(
+                                std::declval<FormatSink&>(), v))>::value,
+                        IntegralConvertResult> {
+  if (conv.conversion_char() == FormatConversionCharInternal::v) {
+    using FormatSinkT =
+        absl::enable_if_t<sizeof(const T& (*)()) != 0, FormatSink>;
+    auto fs = sink->Wrap<FormatSinkT>();
+    AbslStringify(fs, v);
+    return {true};
+  } else {
+    return {ConvertIntArg(
+        static_cast<typename std::underlying_type<T>::type>(v), conv, sink)};
+  }
+}
+
+template <typename T>
+auto FormatConvertImpl(const T& v, FormatConversionSpecImpl,
+                       FormatSinkImpl* sink)
+    -> std::enable_if_t<!std::is_enum<T>::value &&
+                            std::is_void<decltype(AbslStringify(
+                                std::declval<FormatSink&>(), v))>::value,
+                        ArgConvertResult<FormatConversionCharSetInternal::v>> {
+  using FormatSinkT =
+      absl::enable_if_t<sizeof(const T& (*)()) != 0, FormatSink>;
+  auto fs = sink->Wrap<FormatSinkT>();
+  AbslStringify(fs, v);
+  return {true};
+}
+
+template <typename T>
 class StreamedWrapper;
 
 // If 'v' can be converted (in the printf sense) according to 'conv',
@@ -82,11 +193,6 @@
 };
 
 template <FormatConversionCharSet C>
-struct ArgConvertResult {
-  bool value;
-};
-
-template <FormatConversionCharSet C>
 constexpr FormatConversionCharSet ExtractCharSet(FormatConvertResult<C>) {
   return C;
 }
@@ -96,8 +202,8 @@
   return C;
 }
 
-using StringConvertResult =
-    ArgConvertResult<FormatConversionCharSetInternal::s>;
+using StringConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
+    FormatConversionCharSetInternal::s, FormatConversionCharSetInternal::v)>;
 ArgConvertResult<FormatConversionCharSetInternal::p> FormatConvertImpl(
     VoidPtr v, FormatConversionSpecImpl conv, FormatSinkImpl* sink);
 
@@ -108,6 +214,14 @@
 StringConvertResult FormatConvertImpl(string_view v,
                                       FormatConversionSpecImpl conv,
                                       FormatSinkImpl* sink);
+#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW)
+inline StringConvertResult FormatConvertImpl(std::string_view v,
+                                             FormatConversionSpecImpl conv,
+                                             FormatSinkImpl* sink) {
+  return FormatConvertImpl(absl::string_view(v.data(), v.size()), conv, sink);
+}
+#endif  // ABSL_HAVE_STD_STRING_VIEW && !ABSL_USES_STD_STRING_VIEW
+
 ArgConvertResult<FormatConversionCharSetUnion(
     FormatConversionCharSetInternal::s, FormatConversionCharSetInternal::p)>
 FormatConvertImpl(const char* v, const FormatConversionSpecImpl conv,
@@ -122,7 +236,7 @@
   size_t space_remaining = 0;
 
   int width = conv.width();
-  if (width >= 0) space_remaining = width;
+  if (width >= 0) space_remaining = static_cast<size_t>(width);
 
   size_t to_write = value.size();
 
@@ -151,12 +265,7 @@
   return {true};
 }
 
-using IntegralConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
-    FormatConversionCharSetInternal::c,
-    FormatConversionCharSetInternal::kNumeric,
-    FormatConversionCharSetInternal::kStar)>;
-using FloatingConvertResult =
-    ArgConvertResult<FormatConversionCharSetInternal::kFloating>;
+bool ConvertBoolArg(bool v, FormatSinkImpl* sink);
 
 // Floats.
 FloatingConvertResult FormatConvertImpl(float v, FormatConversionSpecImpl conv,
@@ -168,16 +277,16 @@
                                         FormatSinkImpl* sink);
 
 // Chars.
-IntegralConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv,
-                                        FormatSinkImpl* sink);
+CharConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv,
+                                    FormatSinkImpl* sink);
+
+// Ints.
 IntegralConvertResult FormatConvertImpl(signed char v,
                                         FormatConversionSpecImpl conv,
                                         FormatSinkImpl* sink);
 IntegralConvertResult FormatConvertImpl(unsigned char v,
                                         FormatConversionSpecImpl conv,
                                         FormatSinkImpl* sink);
-
-// Ints.
 IntegralConvertResult FormatConvertImpl(short v,  // NOLINT
                                         FormatConversionSpecImpl conv,
                                         FormatSinkImpl* sink);
@@ -206,9 +315,16 @@
 IntegralConvertResult FormatConvertImpl(uint128 v,
                                         FormatConversionSpecImpl conv,
                                         FormatSinkImpl* sink);
+
+// This function needs to be a template due to ambiguity regarding type
+// conversions.
 template <typename T, enable_if_t<std::is_same<T, bool>::value, int> = 0>
 IntegralConvertResult FormatConvertImpl(T v, FormatConversionSpecImpl conv,
                                         FormatSinkImpl* sink) {
+  if (conv.conversion_char() == FormatConversionCharInternal::v) {
+    return {ConvertBoolArg(v, sink)};
+  }
+
   return FormatConvertImpl(static_cast<int>(v), conv, sink);
 }
 
@@ -216,7 +332,8 @@
 // FormatArgImpl will use the underlying Convert functions instead.
 template <typename T>
 typename std::enable_if<std::is_enum<T>::value &&
-                            !HasUserDefinedConvert<T>::value,
+                            !HasUserDefinedConvert<T>::value &&
+                            !strings_internal::HasAbslStringify<T>::value,
                         IntegralConvertResult>::type
 FormatConvertImpl(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink);
 
@@ -279,11 +396,11 @@
 
 template <typename Arg>
 constexpr FormatConversionCharSet ArgumentToConv() {
-  return absl::str_format_internal::ExtractCharSet(
-      decltype(str_format_internal::FormatConvertImpl(
-          std::declval<const Arg&>(),
-          std::declval<const FormatConversionSpecImpl&>(),
-          std::declval<FormatSinkImpl*>())){});
+  using ConvResult = decltype(str_format_internal::FormatConvertImpl(
+      std::declval<const Arg&>(),
+      std::declval<const FormatConversionSpecImpl&>(),
+      std::declval<FormatSinkImpl*>()));
+  return absl::str_format_internal::ExtractCharSet(ConvResult{});
 }
 
 // A type-erased handle to a format argument.
@@ -324,12 +441,13 @@
   // For everything else:
   //   - Decay char* and char arrays into `const char*`
   //   - Decay any other pointer to `const void*`
-  //   - Decay all enums to their underlying type.
+  //   - Decay all enums to the integral promotion of their underlying type.
   //   - Decay function pointers to void*.
   template <typename T, typename = void>
   struct DecayType {
     static constexpr bool kHasUserDefined =
-        str_format_internal::HasUserDefinedConvert<T>::value;
+        str_format_internal::HasUserDefinedConvert<T>::value ||
+        strings_internal::HasAbslStringify<T>::value;
     using type = typename std::conditional<
         !kHasUserDefined && std::is_convertible<T, const char*>::value,
         const char*,
@@ -341,8 +459,9 @@
   struct DecayType<T,
                    typename std::enable_if<
                        !str_format_internal::HasUserDefinedConvert<T>::value &&
+                       !strings_internal::HasAbslStringify<T>::value &&
                        std::is_enum<T>::value>::type> {
-    using type = typename std::underlying_type<T>::type;
+    using type = decltype(+typename std::underlying_type<T>::type());
   };
 
  public:
diff --git a/abseil-cpp/absl/strings/internal/str_format/arg_test.cc b/abseil-cpp/absl/strings/internal/str_format/arg_test.cc
index f53fd6b..1261937 100644
--- a/abseil-cpp/absl/strings/internal/str_format/arg_test.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/arg_test.cc
@@ -6,6 +6,12 @@
 //
 //      https://www.apache.org/licenses/LICENSE-2.0
 //
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/internal/str_format/arg.h"
 
 #include <ostream>
diff --git a/abseil-cpp/absl/strings/internal/str_format/bind.cc b/abseil-cpp/absl/strings/internal/str_format/bind.cc
index 6980ed1..77a4222 100644
--- a/abseil-cpp/absl/strings/internal/str_format/bind.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/bind.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/internal/str_format/bind.h"
 
 #include <cerrno>
@@ -18,7 +32,8 @@
     return false;
   }
   // -1 because positions are 1-based
-  return FormatArgImplFriend::ToInt(pack[position - 1], value);
+  return FormatArgImplFriend::ToInt(pack[static_cast<size_t>(position) - 1],
+                                    value);
 }
 
 class ArgContext {
@@ -42,9 +57,9 @@
   const FormatArgImpl* arg = nullptr;
   int arg_position = unbound->arg_position;
   if (static_cast<size_t>(arg_position - 1) >= pack_.size()) return false;
-  arg = &pack_[arg_position - 1];  // 1-based
+  arg = &pack_[static_cast<size_t>(arg_position - 1)];  // 1-based
 
-  if (!unbound->flags.basic) {
+  if (unbound->flags != Flags::kBasic) {
     int width = unbound->width.value();
     bool force_left = false;
     if (unbound->width.is_from_arg()) {
@@ -70,9 +85,8 @@
     FormatConversionSpecImplFriend::SetPrecision(precision, bound);
 
     if (force_left) {
-      Flags flags = unbound->flags;
-      flags.left = true;
-      FormatConversionSpecImplFriend::SetFlags(flags, bound);
+      FormatConversionSpecImplFriend::SetFlags(unbound->flags | Flags::kLeft,
+                                               bound);
     } else {
       FormatConversionSpecImplFriend::SetFlags(unbound->flags, bound);
     }
@@ -221,7 +235,7 @@
     errno = sink.error();
     return -1;
   }
-  if (sink.count() > std::numeric_limits<int>::max()) {
+  if (sink.count() > static_cast<size_t>(std::numeric_limits<int>::max())) {
     errno = EFBIG;
     return -1;
   }
diff --git a/abseil-cpp/absl/strings/internal/str_format/bind.h b/abseil-cpp/absl/strings/internal/str_format/bind.h
index 585246e..5e2a43d 100644
--- a/abseil-cpp/absl/strings/internal/str_format/bind.h
+++ b/abseil-cpp/absl/strings/internal/str_format/bind.h
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_
 #define ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_
 
@@ -7,10 +21,12 @@
 #include <string>
 
 #include "absl/base/port.h"
+#include "absl/container/inlined_vector.h"
 #include "absl/strings/internal/str_format/arg.h"
 #include "absl/strings/internal/str_format/checker.h"
 #include "absl/strings/internal/str_format/parser.h"
 #include "absl/types/span.h"
+#include "absl/utility/utility.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -73,6 +89,36 @@
     : public MakeDependent<UntypedFormatSpec, Args...>::type {
   using Base = typename MakeDependent<UntypedFormatSpec, Args...>::type;
 
+  template <bool res>
+  struct ErrorMaker {
+    constexpr bool operator()(int) const { return res; }
+  };
+
+  template <int i, int j>
+  static constexpr bool CheckArity(ErrorMaker<true> SpecifierCount = {},
+                                   ErrorMaker<i == j> ParametersPassed = {}) {
+    static_assert(SpecifierCount(i) == ParametersPassed(j),
+                  "Number of arguments passed must match the number of "
+                  "conversion specifiers.");
+    return true;
+  }
+
+  template <FormatConversionCharSet specified, FormatConversionCharSet passed,
+            int arg>
+  static constexpr bool CheckMatch(
+      ErrorMaker<Contains(specified, passed)> MismatchedArgumentNumber = {}) {
+    static_assert(MismatchedArgumentNumber(arg),
+                  "Passed argument must match specified format.");
+    return true;
+  }
+
+  template <FormatConversionCharSet... C, size_t... I>
+  static bool CheckMatches(absl::index_sequence<I...>) {
+    bool res[] = {true, CheckMatch<Args, C, I + 1>()...};
+    (void)res;
+    return true;
+  }
+
  public:
 #ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
 
@@ -86,7 +132,7 @@
   // We use the 'unavailable' attribute to give a better compiler error than
   // just 'method is deleted'.
   // To avoid checking the format twice, we just check that the format is
-  // constexpr. If is it valid, then the overload below will kick in.
+  // constexpr. If it is valid, then the overload below will kick in.
   // We add the template here to make this overload have lower priority.
   template <typename = void>
   FormatSpecTemplate(const char* s)  // NOLINT
@@ -98,7 +144,8 @@
   template <typename T = void>
   FormatSpecTemplate(string_view s)  // NOLINT
       __attribute__((enable_if(str_format_internal::EnsureConstexpr(s),
-                               "constexpr trap"))) {
+                               "constexpr trap")))
+      : Base("to avoid noise in the compiler error") {
     static_assert(sizeof(T*) == 0,
                   "Format specified does not match the arguments passed.");
   }
@@ -119,29 +166,19 @@
 
 #endif  // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
 
-  template <FormatConversionCharSet... C,
-            typename = typename std::enable_if<
-                AllOf(sizeof...(C) == sizeof...(Args), Contains(Args,
-                                                                C)...)>::type>
+  template <FormatConversionCharSet... C>
   FormatSpecTemplate(const ExtendedParsedFormat<C...>& pc)  // NOLINT
-      : Base(&pc) {}
+      : Base(&pc) {
+    CheckArity<sizeof...(C), sizeof...(Args)>();
+    CheckMatches<C...>(absl::make_index_sequence<sizeof...(C)>{});
+  }
 };
 
 class Streamable {
  public:
   Streamable(const UntypedFormatSpecImpl& format,
              absl::Span<const FormatArgImpl> args)
-      : format_(format) {
-    if (args.size() <= ABSL_ARRAYSIZE(few_args_)) {
-      for (size_t i = 0; i < args.size(); ++i) {
-        few_args_[i] = args[i];
-      }
-      args_ = absl::MakeSpan(few_args_, args.size());
-    } else {
-      many_args_.assign(args.begin(), args.end());
-      args_ = many_args_;
-    }
-  }
+      : format_(format), args_(args.begin(), args.end()) {}
 
   std::ostream& Print(std::ostream& os) const;
 
@@ -151,12 +188,7 @@
 
  private:
   const UntypedFormatSpecImpl& format_;
-  absl::Span<const FormatArgImpl> args_;
-  // if args_.size() is 4 or less:
-  FormatArgImpl few_args_[4] = {FormatArgImpl(0), FormatArgImpl(0),
-                                FormatArgImpl(0), FormatArgImpl(0)};
-  // if args_.size() is more than 4:
-  std::vector<FormatArgImpl> many_args_;
+  absl::InlinedVector<FormatArgImpl, 4> args_;
 };
 
 // for testing
@@ -165,8 +197,7 @@
 bool BindWithPack(const UnboundConversion* props,
                   absl::Span<const FormatArgImpl> pack, BoundConversion* bound);
 
-bool FormatUntyped(FormatRawSinkImpl raw_sink,
-                   UntypedFormatSpecImpl format,
+bool FormatUntyped(FormatRawSinkImpl raw_sink, UntypedFormatSpecImpl format,
                    absl::Span<const FormatArgImpl> args);
 
 std::string& AppendPack(std::string* out, UntypedFormatSpecImpl format,
@@ -185,13 +216,14 @@
 template <typename T>
 class StreamedWrapper {
  public:
-  explicit StreamedWrapper(const T& v) : v_(v) { }
+  explicit StreamedWrapper(const T& v) : v_(v) {}
 
  private:
   template <typename S>
-  friend ArgConvertResult<FormatConversionCharSetInternal::s> FormatConvertImpl(
-      const StreamedWrapper<S>& v, FormatConversionSpecImpl conv,
-      FormatSinkImpl* out);
+  friend ArgConvertResult<FormatConversionCharSetUnion(
+      FormatConversionCharSetInternal::s, FormatConversionCharSetInternal::v)>
+  FormatConvertImpl(const StreamedWrapper<S>& v, FormatConversionSpecImpl conv,
+                    FormatSinkImpl* out);
   const T& v_;
 };
 
diff --git a/abseil-cpp/absl/strings/internal/str_format/bind_test.cc b/abseil-cpp/absl/strings/internal/str_format/bind_test.cc
index 64790a8..1eef9c4 100644
--- a/abseil-cpp/absl/strings/internal/str_format/bind_test.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/bind_test.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/internal/str_format/bind.h"
 
 #include <string.h>
diff --git a/abseil-cpp/absl/strings/internal/str_format/checker.h b/abseil-cpp/absl/strings/internal/str_format/checker.h
index 424c51f..eab6ab9 100644
--- a/abseil-cpp/absl/strings/internal/str_format/checker.h
+++ b/abseil-cpp/absl/strings/internal/str_format/checker.h
@@ -1,313 +1,94 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_
 #define ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_
 
+#include <algorithm>
+
 #include "absl/base/attributes.h"
 #include "absl/strings/internal/str_format/arg.h"
+#include "absl/strings/internal/str_format/constexpr_parser.h"
 #include "absl/strings/internal/str_format/extension.h"
 
 // Compile time check support for entry points.
 
 #ifndef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
-#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+// We disable format checker under vscode intellisense compilation.
+// See https://github.com/microsoft/vscode-cpptools/issues/3683 for
+// more details.
+#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && \
+    !defined(__INTELLISENSE__)
 #define ABSL_INTERNAL_ENABLE_FORMAT_CHECKER 1
-#endif  // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+#endif  // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) &&
+        // !defined(__INTELLISENSE__)
 #endif  // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace str_format_internal {
 
-constexpr bool AllOf() { return true; }
-
-template <typename... T>
-constexpr bool AllOf(bool b, T... t) {
-  return b && AllOf(t...);
-}
-
 #ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
 
-constexpr bool ContainsChar(const char* chars, char c) {
-  return *chars == c || (*chars && ContainsChar(chars + 1, c));
-}
-
-// A constexpr compatible list of Convs.
-struct ConvList {
-  const FormatConversionCharSet* array;
-  int count;
-
-  // We do the bound check here to avoid having to do it on the callers.
-  // Returning an empty FormatConversionCharSet has the same effect as
-  // short circuiting because it will never match any conversion.
-  constexpr FormatConversionCharSet operator[](int i) const {
-    return i < count ? array[i] : FormatConversionCharSet{};
-  }
-
-  constexpr ConvList without_front() const {
-    return count != 0 ? ConvList{array + 1, count - 1} : *this;
-  }
-};
-
-template <size_t count>
-struct ConvListT {
-  // Make sure the array has size > 0.
-  FormatConversionCharSet list[count ? count : 1];
-};
-
-constexpr char GetChar(string_view str, size_t index) {
-  return index < str.size() ? str[index] : char{};
-}
-
-constexpr string_view ConsumeFront(string_view str, size_t len = 1) {
-  return len <= str.size() ? string_view(str.data() + len, str.size() - len)
-                           : string_view();
-}
-
-constexpr string_view ConsumeAnyOf(string_view format, const char* chars) {
-  return ContainsChar(chars, GetChar(format, 0))
-             ? ConsumeAnyOf(ConsumeFront(format), chars)
-             : format;
-}
-
-constexpr bool IsDigit(char c) { return c >= '0' && c <= '9'; }
-
-// Helper class for the ParseDigits function.
-// It encapsulates the two return values we need there.
-struct Integer {
-  string_view format;
-  int value;
-
-  // If the next character is a '$', consume it.
-  // Otherwise, make `this` an invalid positional argument.
-  constexpr Integer ConsumePositionalDollar() const {
-    return GetChar(format, 0) == '$' ? Integer{ConsumeFront(format), value}
-                                     : Integer{format, 0};
-  }
-};
-
-constexpr Integer ParseDigits(string_view format, int value = 0) {
-  return IsDigit(GetChar(format, 0))
-             ? ParseDigits(ConsumeFront(format),
-                           10 * value + GetChar(format, 0) - '0')
-             : Integer{format, value};
-}
-
-// Parse digits for a positional argument.
-// The parsing also consumes the '$'.
-constexpr Integer ParsePositional(string_view format) {
-  return ParseDigits(format).ConsumePositionalDollar();
-}
-
-// Parses a single conversion specifier.
-// See ConvParser::Run() for post conditions.
-class ConvParser {
-  constexpr ConvParser SetFormat(string_view format) const {
-    return ConvParser(format, args_, error_, arg_position_, is_positional_);
-  }
-
-  constexpr ConvParser SetArgs(ConvList args) const {
-    return ConvParser(format_, args, error_, arg_position_, is_positional_);
-  }
-
-  constexpr ConvParser SetError(bool error) const {
-    return ConvParser(format_, args_, error_ || error, arg_position_,
-                      is_positional_);
-  }
-
-  constexpr ConvParser SetArgPosition(int arg_position) const {
-    return ConvParser(format_, args_, error_, arg_position, is_positional_);
-  }
-
-  // Consumes the next arg and verifies that it matches `conv`.
-  // `error_` is set if there is no next arg or if it doesn't match `conv`.
-  constexpr ConvParser ConsumeNextArg(char conv) const {
-    return SetArgs(args_.without_front()).SetError(!Contains(args_[0], conv));
-  }
-
-  // Verify that positional argument `i.value` matches `conv`.
-  // `error_` is set if `i.value` is not a valid argument or if it doesn't
-  // match.
-  constexpr ConvParser VerifyPositional(Integer i, char conv) const {
-    return SetFormat(i.format).SetError(!Contains(args_[i.value - 1], conv));
-  }
-
-  // Parse the position of the arg and store it in `arg_position_`.
-  constexpr ConvParser ParseArgPosition(Integer arg) const {
-    return SetFormat(arg.format).SetArgPosition(arg.value);
-  }
-
-  // Consume the flags.
-  constexpr ConvParser ParseFlags() const {
-    return SetFormat(ConsumeAnyOf(format_, "-+ #0"));
-  }
-
-  // Consume the width.
-  // If it is '*', we verify that it matches `args_`. `error_` is set if it
-  // doesn't match.
-  constexpr ConvParser ParseWidth() const {
-    return IsDigit(GetChar(format_, 0))
-               ? SetFormat(ParseDigits(format_).format)
-               : GetChar(format_, 0) == '*'
-                     ? is_positional_
-                           ? VerifyPositional(
-                                 ParsePositional(ConsumeFront(format_)), '*')
-                           : SetFormat(ConsumeFront(format_))
-                                 .ConsumeNextArg('*')
-                     : *this;
-  }
-
-  // Consume the precision.
-  // If it is '*', we verify that it matches `args_`. `error_` is set if it
-  // doesn't match.
-  constexpr ConvParser ParsePrecision() const {
-    return GetChar(format_, 0) != '.'
-               ? *this
-               : GetChar(format_, 1) == '*'
-                     ? is_positional_
-                           ? VerifyPositional(
-                                 ParsePositional(ConsumeFront(format_, 2)), '*')
-                           : SetFormat(ConsumeFront(format_, 2))
-                                 .ConsumeNextArg('*')
-                     : SetFormat(ParseDigits(ConsumeFront(format_)).format);
-  }
-
-  // Consume the length characters.
-  constexpr ConvParser ParseLength() const {
-    return SetFormat(ConsumeAnyOf(format_, "lLhjztq"));
-  }
-
-  // Consume the conversion character and verify that it matches `args_`.
-  // `error_` is set if it doesn't match.
-  constexpr ConvParser ParseConversion() const {
-    return is_positional_
-               ? VerifyPositional({ConsumeFront(format_), arg_position_},
-                                  GetChar(format_, 0))
-               : ConsumeNextArg(GetChar(format_, 0))
-                     .SetFormat(ConsumeFront(format_));
-  }
-
-  constexpr ConvParser(string_view format, ConvList args, bool error,
-                       int arg_position, bool is_positional)
-      : format_(format),
-        args_(args),
-        error_(error),
-        arg_position_(arg_position),
-        is_positional_(is_positional) {}
-
- public:
-  constexpr ConvParser(string_view format, ConvList args, bool is_positional)
-      : format_(format),
-        args_(args),
-        error_(false),
-        arg_position_(0),
-        is_positional_(is_positional) {}
-
-  // Consume the whole conversion specifier.
-  // `format()` will be set to the character after the conversion character.
-  // `error()` will be set if any of the arguments do not match.
-  constexpr ConvParser Run() const {
-    return (is_positional_ ? ParseArgPosition(ParsePositional(format_)) : *this)
-        .ParseFlags()
-        .ParseWidth()
-        .ParsePrecision()
-        .ParseLength()
-        .ParseConversion();
-  }
-
-  constexpr string_view format() const { return format_; }
-  constexpr ConvList args() const { return args_; }
-  constexpr bool error() const { return error_; }
-  constexpr bool is_positional() const { return is_positional_; }
-
- private:
-  string_view format_;
-  // Current list of arguments. If we are not in positional mode we will consume
-  // from the front.
-  ConvList args_;
-  bool error_;
-  // Holds the argument position of the conversion character, if we are in
-  // positional mode. Otherwise, it is unspecified.
-  int arg_position_;
-  // Whether we are in positional mode.
-  // It changes the behavior of '*' and where to find the converted argument.
-  bool is_positional_;
-};
-
-// Parses a whole format expression.
-// See FormatParser::Run().
-class FormatParser {
-  static constexpr bool FoundPercent(string_view format) {
-    return format.empty() ||
-           (GetChar(format, 0) == '%' && GetChar(format, 1) != '%');
-  }
-
-  // We use an inner function to increase the recursion limit.
-  // The inner function consumes up to `limit` characters on every run.
-  // This increases the limit from 512 to ~512*limit.
-  static constexpr string_view ConsumeNonPercentInner(string_view format,
-                                                      int limit = 20) {
-    return FoundPercent(format) || !limit
-               ? format
-               : ConsumeNonPercentInner(
-                     ConsumeFront(format, GetChar(format, 0) == '%' &&
-                                                  GetChar(format, 1) == '%'
-                                              ? 2
-                                              : 1),
-                     limit - 1);
-  }
-
-  // Consume characters until the next conversion spec %.
-  // It skips %%.
-  static constexpr string_view ConsumeNonPercent(string_view format) {
-    return FoundPercent(format)
-               ? format
-               : ConsumeNonPercent(ConsumeNonPercentInner(format));
-  }
-
-  static constexpr bool IsPositional(string_view format) {
-    return IsDigit(GetChar(format, 0)) ? IsPositional(ConsumeFront(format))
-                                       : GetChar(format, 0) == '$';
-  }
-
-  constexpr bool RunImpl(bool is_positional) const {
-    // In non-positional mode we require all arguments to be consumed.
-    // In positional mode just reaching the end of the format without errors is
-    // enough.
-    return (format_.empty() && (is_positional || args_.count == 0)) ||
-           (!format_.empty() &&
-            ValidateArg(
-                ConvParser(ConsumeFront(format_), args_, is_positional).Run()));
-  }
-
-  constexpr bool ValidateArg(ConvParser conv) const {
-    return !conv.error() && FormatParser(conv.format(), conv.args())
-                                .RunImpl(conv.is_positional());
-  }
-
- public:
-  constexpr FormatParser(string_view format, ConvList args)
-      : format_(ConsumeNonPercent(format)), args_(args) {}
-
-  // Runs the parser for `format` and `args`.
-  // It verifies that the format is valid and that all conversion specifiers
-  // match the arguments passed.
-  // In non-positional mode it also verfies that all arguments are consumed.
-  constexpr bool Run() const {
-    return RunImpl(!format_.empty() && IsPositional(ConsumeFront(format_)));
-  }
-
- private:
-  string_view format_;
-  // Current list of arguments.
-  // If we are not in positional mode we will consume from the front and will
-  // have to be empty in the end.
-  ConvList args_;
-};
-
 template <FormatConversionCharSet... C>
 constexpr bool ValidFormatImpl(string_view format) {
-  return FormatParser(format,
-                      {ConvListT<sizeof...(C)>{{C...}}.list, sizeof...(C)})
-      .Run();
+  int next_arg = 0;
+  const char* p = format.data();
+  const char* const end = p + format.size();
+  constexpr FormatConversionCharSet
+      kAllowedConvs[(std::max)(sizeof...(C), size_t{1})] = {C...};
+  bool used[(std::max)(sizeof...(C), size_t{1})]{};
+  constexpr int kNumArgs = sizeof...(C);
+  while (p != end) {
+    while (p != end && *p != '%') ++p;
+    if (p == end) {
+      break;
+    }
+    if (p + 1 >= end) return false;
+    if (p[1] == '%') {
+      // %%
+      p += 2;
+      continue;
+    }
+
+    UnboundConversion conv(absl::kConstInit);
+    p = ConsumeUnboundConversion(p + 1, end, &conv, &next_arg);
+    if (p == nullptr) return false;
+    if (conv.arg_position <= 0 || conv.arg_position > kNumArgs) {
+      return false;
+    }
+    if (!Contains(kAllowedConvs[conv.arg_position - 1], conv.conv)) {
+      return false;
+    }
+    used[conv.arg_position - 1] = true;
+    for (auto extra : {conv.width, conv.precision}) {
+      if (extra.is_from_arg()) {
+        int pos = extra.get_from_arg();
+        if (pos <= 0 || pos > kNumArgs) return false;
+        used[pos - 1] = true;
+        if (!Contains(kAllowedConvs[pos - 1], '*')) {
+          return false;
+        }
+      }
+    }
+  }
+  if (sizeof...(C) != 0) {
+    for (bool b : used) {
+      if (!b) return false;
+    }
+  }
+  return true;
 }
 
 #endif  // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
diff --git a/abseil-cpp/absl/strings/internal/str_format/checker_test.cc b/abseil-cpp/absl/strings/internal/str_format/checker_test.cc
index a76d70b..a86bed3 100644
--- a/abseil-cpp/absl/strings/internal/str_format/checker_test.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/checker_test.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include <string>
 
 #include "gmock/gmock.h"
@@ -25,16 +39,16 @@
 
 TEST(StrFormatChecker, ArgumentToConv) {
   FormatConversionCharSet conv = ArgumentToConv<std::string>();
-  EXPECT_EQ(ConvToString(conv), "s");
+  EXPECT_EQ(ConvToString(conv), "sv");
 
   conv = ArgumentToConv<const char*>();
   EXPECT_EQ(ConvToString(conv), "sp");
 
   conv = ArgumentToConv<double>();
-  EXPECT_EQ(ConvToString(conv), "fFeEgGaA");
+  EXPECT_EQ(ConvToString(conv), "fFeEgGaAv");
 
   conv = ArgumentToConv<int>();
-  EXPECT_EQ(ConvToString(conv), "cdiouxXfFeEgGaA*");
+  EXPECT_EQ(ConvToString(conv), "cdiouxXfFeEgGaAv*");
 
   conv = ArgumentToConv<std::string*>();
   EXPECT_EQ(ConvToString(conv), "p");
@@ -79,6 +93,7 @@
       ValidFormat<void (*)(), volatile int*>("%p %p"),  //
       ValidFormat<string_view, const char*, double, void*>(
           "string_view=%s const char*=%s double=%f void*=%p)"),
+      ValidFormat<int>("%v"),  //
 
       ValidFormat<int>("%% %1$d"),               //
       ValidFormat<int>("%1$ld"),                 //
@@ -95,7 +110,9 @@
       ValidFormat<int, double>("%2$.*1$f"),      //
       ValidFormat<void*, string_view, const char*, double>(
           "string_view=%2$s const char*=%3$s double=%4$f void*=%1$p "
-          "repeat=%3$s)")};
+          "repeat=%3$s)"),
+      ValidFormat<std::string>("%1$v"),
+  };
 
   for (Case c : trues) {
     EXPECT_TRUE(c.result) << c.format;
@@ -116,6 +133,8 @@
       ValidFormat<int>("%*d"),               //
       ValidFormat<std::string>("%p"),        //
       ValidFormat<int (*)(int)>("%d"),       //
+      ValidFormat<int>("%1v"),               //
+      ValidFormat<int>("%.1v"),              //
 
       ValidFormat<>("%3$d"),                     //
       ValidFormat<>("%1$r"),                     //
@@ -124,13 +143,14 @@
       ValidFormat<int>("%1$*2$1d"),              //
       ValidFormat<int>("%1$1-d"),                //
       ValidFormat<std::string, int>("%2$*1$s"),  //
-      ValidFormat<std::string>("%1$p"),
+      ValidFormat<std::string>("%1$p"),          //
+      ValidFormat<int>("%1$*2$v"),               //
 
       ValidFormat<int, int>("%d %2$d"),  //
   };
 
   for (Case c : falses) {
-    EXPECT_FALSE(c.result) << c.format;
+    EXPECT_FALSE(c.result) << "format<" << c.format << ">";
   }
 }
 
diff --git a/abseil-cpp/absl/strings/internal/str_format/constexpr_parser.h b/abseil-cpp/absl/strings/internal/str_format/constexpr_parser.h
new file mode 100644
index 0000000..b70a16e
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/str_format/constexpr_parser.h
@@ -0,0 +1,352 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_CONSTEXPR_PARSER_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_CONSTEXPR_PARSER_H_
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+
+#include "absl/base/const_init.h"
+#include "absl/strings/internal/str_format/extension.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+enum class LengthMod : std::uint8_t { h, hh, l, ll, L, j, z, t, q, none };
+
+// The analyzed properties of a single specified conversion.
+struct UnboundConversion {
+  // This is a user defined default constructor on purpose to skip the
+  // initialization of parts of the object that are not necessary.
+  UnboundConversion() {}  // NOLINT
+
+  // This constructor is provided for the static checker. We don't want to do
+  // the unnecessary initialization in the normal case.
+  explicit constexpr UnboundConversion(absl::ConstInitType)
+      : arg_position{}, width{}, precision{} {}
+
+  class InputValue {
+   public:
+    constexpr void set_value(int value) {
+      assert(value >= 0);
+      value_ = value;
+    }
+    constexpr int value() const { return value_; }
+
+    // Marks the value as "from arg". aka the '*' format.
+    // Requires `value >= 1`.
+    // When set, is_from_arg() return true and get_from_arg() returns the
+    // original value.
+    // `value()`'s return value is unspecified in this state.
+    constexpr void set_from_arg(int value) {
+      assert(value > 0);
+      value_ = -value - 1;
+    }
+    constexpr bool is_from_arg() const { return value_ < -1; }
+    constexpr int get_from_arg() const {
+      assert(is_from_arg());
+      return -value_ - 1;
+    }
+
+   private:
+    int value_ = -1;
+  };
+
+  // No need to initialize. It will always be set in the parser.
+  int arg_position;
+
+  InputValue width;
+  InputValue precision;
+
+  Flags flags = Flags::kBasic;
+  LengthMod length_mod = LengthMod::none;
+  FormatConversionChar conv = FormatConversionCharInternal::kNone;
+};
+
+// Helper tag class for the table below.
+// It allows fast `char -> ConversionChar/LengthMod/Flags` checking and
+// conversions.
+class ConvTag {
+ public:
+  constexpr ConvTag(FormatConversionChar conversion_char)  // NOLINT
+      : tag_(static_cast<uint8_t>(conversion_char)) {}
+  constexpr ConvTag(LengthMod length_mod)  // NOLINT
+      : tag_(0x80 | static_cast<uint8_t>(length_mod)) {}
+  constexpr ConvTag(Flags flags)  // NOLINT
+      : tag_(0xc0 | static_cast<uint8_t>(flags)) {}
+  constexpr ConvTag() : tag_(0xFF) {}
+
+  constexpr bool is_conv() const { return (tag_ & 0x80) == 0; }
+  constexpr bool is_length() const { return (tag_ & 0xC0) == 0x80; }
+  constexpr bool is_flags() const { return (tag_ & 0xE0) == 0xC0; }
+
+  constexpr FormatConversionChar as_conv() const {
+    assert(is_conv());
+    assert(!is_length());
+    assert(!is_flags());
+    return static_cast<FormatConversionChar>(tag_);
+  }
+  constexpr LengthMod as_length() const {
+    assert(!is_conv());
+    assert(is_length());
+    assert(!is_flags());
+    return static_cast<LengthMod>(tag_ & 0x3F);
+  }
+  constexpr Flags as_flags() const {
+    assert(!is_conv());
+    assert(!is_length());
+    assert(is_flags());
+    return static_cast<Flags>(tag_ & 0x1F);
+  }
+
+ private:
+  uint8_t tag_;
+};
+
+struct ConvTagHolder {
+  using CC = FormatConversionCharInternal;
+  using LM = LengthMod;
+
+  // Abbreviations to fit in the table below.
+  static constexpr auto kFSign = Flags::kSignCol;
+  static constexpr auto kFAlt = Flags::kAlt;
+  static constexpr auto kFPos = Flags::kShowPos;
+  static constexpr auto kFLeft = Flags::kLeft;
+  static constexpr auto kFZero = Flags::kZero;
+
+  static constexpr ConvTag value[256] = {
+      {},     {},    {},    {},    {},    {},     {},    {},     // 00-07
+      {},     {},    {},    {},    {},    {},     {},    {},     // 08-0f
+      {},     {},    {},    {},    {},    {},     {},    {},     // 10-17
+      {},     {},    {},    {},    {},    {},     {},    {},     // 18-1f
+      kFSign, {},    {},    kFAlt, {},    {},     {},    {},     //  !"#$%&'
+      {},     {},    {},    kFPos, {},    kFLeft, {},    {},     // ()*+,-./
+      kFZero, {},    {},    {},    {},    {},     {},    {},     // 01234567
+      {},     {},    {},    {},    {},    {},     {},    {},     // 89:;<=>?
+      {},     CC::A, {},    {},    {},    CC::E,  CC::F, CC::G,  // @ABCDEFG
+      {},     {},    {},    {},    LM::L, {},     {},    {},     // HIJKLMNO
+      {},     {},    {},    {},    {},    {},     {},    {},     // PQRSTUVW
+      CC::X,  {},    {},    {},    {},    {},     {},    {},     // XYZ[\]^_
+      {},     CC::a, {},    CC::c, CC::d, CC::e,  CC::f, CC::g,  // `abcdefg
+      LM::h,  CC::i, LM::j, {},    LM::l, {},     CC::n, CC::o,  // hijklmno
+      CC::p,  LM::q, {},    CC::s, LM::t, CC::u,  CC::v, {},     // pqrstuvw
+      CC::x,  {},    LM::z, {},    {},    {},     {},    {},     // xyz{|}!
+      {},     {},    {},    {},    {},    {},     {},    {},     // 80-87
+      {},     {},    {},    {},    {},    {},     {},    {},     // 88-8f
+      {},     {},    {},    {},    {},    {},     {},    {},     // 90-97
+      {},     {},    {},    {},    {},    {},     {},    {},     // 98-9f
+      {},     {},    {},    {},    {},    {},     {},    {},     // a0-a7
+      {},     {},    {},    {},    {},    {},     {},    {},     // a8-af
+      {},     {},    {},    {},    {},    {},     {},    {},     // b0-b7
+      {},     {},    {},    {},    {},    {},     {},    {},     // b8-bf
+      {},     {},    {},    {},    {},    {},     {},    {},     // c0-c7
+      {},     {},    {},    {},    {},    {},     {},    {},     // c8-cf
+      {},     {},    {},    {},    {},    {},     {},    {},     // d0-d7
+      {},     {},    {},    {},    {},    {},     {},    {},     // d8-df
+      {},     {},    {},    {},    {},    {},     {},    {},     // e0-e7
+      {},     {},    {},    {},    {},    {},     {},    {},     // e8-ef
+      {},     {},    {},    {},    {},    {},     {},    {},     // f0-f7
+      {},     {},    {},    {},    {},    {},     {},    {},     // f8-ff
+  };
+};
+
+// Keep a single table for all the conversion chars and length modifiers.
+constexpr ConvTag GetTagForChar(char c) {
+  return ConvTagHolder::value[static_cast<unsigned char>(c)];
+}
+
+constexpr bool CheckFastPathSetting(const UnboundConversion& conv) {
+  bool width_precision_needed =
+      conv.width.value() >= 0 || conv.precision.value() >= 0;
+  if (width_precision_needed && conv.flags == Flags::kBasic) {
+#if defined(__clang__)
+    // Some compilers complain about this in constexpr even when not executed,
+    // so only enable the error dump in clang.
+    fprintf(stderr,
+            "basic=%d left=%d show_pos=%d sign_col=%d alt=%d zero=%d "
+            "width=%d precision=%d\n",
+            conv.flags == Flags::kBasic ? 1 : 0,
+            FlagsContains(conv.flags, Flags::kLeft) ? 1 : 0,
+            FlagsContains(conv.flags, Flags::kShowPos) ? 1 : 0,
+            FlagsContains(conv.flags, Flags::kSignCol) ? 1 : 0,
+            FlagsContains(conv.flags, Flags::kAlt) ? 1 : 0,
+            FlagsContains(conv.flags, Flags::kZero) ? 1 : 0, conv.width.value(),
+            conv.precision.value());
+#endif  // defined(__clang__)
+    return false;
+  }
+  return true;
+}
+
+constexpr int ParseDigits(char& c, const char*& pos, const char* const end) {
+  int digits = c - '0';
+  // We do not want to overflow `digits` so we consume at most digits10
+  // digits. If there are more digits the parsing will fail later on when the
+  // digit doesn't match the expected characters.
+  int num_digits = std::numeric_limits<int>::digits10;
+  for (;;) {
+    if (ABSL_PREDICT_FALSE(pos == end)) break;
+    c = *pos++;
+    if ('0' > c || c > '9') break;
+    --num_digits;
+    if (ABSL_PREDICT_FALSE(!num_digits)) break;
+    digits = 10 * digits + c - '0';
+  }
+  return digits;
+}
+
+template <bool is_positional>
+constexpr const char* ConsumeConversion(const char* pos, const char* const end,
+                                        UnboundConversion* conv,
+                                        int* next_arg) {
+  const char* const original_pos = pos;
+  char c = 0;
+  // Read the next char into `c` and update `pos`. Returns false if there are
+  // no more chars to read.
+#define ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR()          \
+  do {                                                  \
+    if (ABSL_PREDICT_FALSE(pos == end)) return nullptr; \
+    c = *pos++;                                         \
+  } while (0)
+
+  if (is_positional) {
+    ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+    if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
+    conv->arg_position = ParseDigits(c, pos, end);
+    assert(conv->arg_position > 0);
+    if (ABSL_PREDICT_FALSE(c != '$')) return nullptr;
+  }
+
+  ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+
+  // We should start with the basic flag on.
+  assert(conv->flags == Flags::kBasic);
+
+  // Any non alpha character makes this conversion not basic.
+  // This includes flags (-+ #0), width (1-9, *) or precision (.).
+  // All conversion characters and length modifiers are alpha characters.
+  if (c < 'A') {
+    while (c <= '0') {
+      auto tag = GetTagForChar(c);
+      if (tag.is_flags()) {
+        conv->flags = conv->flags | tag.as_flags();
+        ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+      } else {
+        break;
+      }
+    }
+
+    if (c <= '9') {
+      if (c >= '0') {
+        int maybe_width = ParseDigits(c, pos, end);
+        if (!is_positional && c == '$') {
+          if (ABSL_PREDICT_FALSE(*next_arg != 0)) return nullptr;
+          // Positional conversion.
+          *next_arg = -1;
+          return ConsumeConversion<true>(original_pos, end, conv, next_arg);
+        }
+        conv->flags = conv->flags | Flags::kNonBasic;
+        conv->width.set_value(maybe_width);
+      } else if (c == '*') {
+        conv->flags = conv->flags | Flags::kNonBasic;
+        ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+        if (is_positional) {
+          if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
+          conv->width.set_from_arg(ParseDigits(c, pos, end));
+          if (ABSL_PREDICT_FALSE(c != '$')) return nullptr;
+          ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+        } else {
+          conv->width.set_from_arg(++*next_arg);
+        }
+      }
+    }
+
+    if (c == '.') {
+      conv->flags = conv->flags | Flags::kNonBasic;
+      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+      if ('0' <= c && c <= '9') {
+        conv->precision.set_value(ParseDigits(c, pos, end));
+      } else if (c == '*') {
+        ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+        if (is_positional) {
+          if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
+          conv->precision.set_from_arg(ParseDigits(c, pos, end));
+          if (c != '$') return nullptr;
+          ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+        } else {
+          conv->precision.set_from_arg(++*next_arg);
+        }
+      } else {
+        conv->precision.set_value(0);
+      }
+    }
+  }
+
+  auto tag = GetTagForChar(c);
+
+  if (ABSL_PREDICT_FALSE(c == 'v' && conv->flags != Flags::kBasic)) {
+    return nullptr;
+  }
+
+  if (ABSL_PREDICT_FALSE(!tag.is_conv())) {
+    if (ABSL_PREDICT_FALSE(!tag.is_length())) return nullptr;
+
+    // It is a length modifier.
+    using str_format_internal::LengthMod;
+    LengthMod length_mod = tag.as_length();
+    ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+    if (c == 'h' && length_mod == LengthMod::h) {
+      conv->length_mod = LengthMod::hh;
+      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+    } else if (c == 'l' && length_mod == LengthMod::l) {
+      conv->length_mod = LengthMod::ll;
+      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+    } else {
+      conv->length_mod = length_mod;
+    }
+    tag = GetTagForChar(c);
+
+    if (ABSL_PREDICT_FALSE(c == 'v')) return nullptr;
+    if (ABSL_PREDICT_FALSE(!tag.is_conv())) return nullptr;
+  }
+#undef ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR
+
+  assert(CheckFastPathSetting(*conv));
+  (void)(&CheckFastPathSetting);
+
+  conv->conv = tag.as_conv();
+  if (!is_positional) conv->arg_position = ++*next_arg;
+  return pos;
+}
+
+// Consume conversion spec prefix (not including '%') of [p, end) if valid.
+// Examples of valid specs would be e.g.: "s", "d", "-12.6f".
+// If valid, it returns the first character following the conversion spec,
+// and the spec part is broken down and returned in 'conv'.
+// If invalid, returns nullptr.
+constexpr const char* ConsumeUnboundConversion(const char* p, const char* end,
+                                               UnboundConversion* conv,
+                                               int* next_arg) {
+  if (*next_arg < 0) return ConsumeConversion<true>(p, end, conv, next_arg);
+  return ConsumeConversion<false>(p, end, conv, next_arg);
+}
+
+}  // namespace str_format_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_STR_FORMAT_CONSTEXPR_PARSER_H_
diff --git a/abseil-cpp/absl/strings/internal/str_format/convert_test.cc b/abseil-cpp/absl/strings/internal/str_format/convert_test.cc
index 634ee78..16ff987 100644
--- a/abseil-cpp/absl/strings/internal/str_format/convert_test.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/convert_test.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include <errno.h>
 #include <stdarg.h>
 #include <stdio.h>
@@ -10,7 +24,9 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/base/attributes.h"
 #include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/strings/internal/str_format/bind.h"
 #include "absl/strings/match.h"
 #include "absl/types/optional.h"
@@ -110,6 +126,7 @@
   delete[] buf;
 }
 
+void StrAppend(std::string *, const char *, ...) ABSL_PRINTF_ATTRIBUTE(2, 3);
 void StrAppend(std::string *out, const char *format, ...) {
   va_list ap;
   va_start(ap, format);
@@ -117,6 +134,7 @@
   va_end(ap);
 }
 
+std::string StrPrint(const char *, ...) ABSL_PRINTF_ATTRIBUTE(1, 2);
 std::string StrPrint(const char *format, ...) {
   va_list ap;
   va_start(ap, format);
@@ -215,6 +233,9 @@
   TestStringConvert(static_cast<const char*>("hello"));
   TestStringConvert(std::string("hello"));
   TestStringConvert(string_view("hello"));
+#if defined(ABSL_HAVE_STD_STRING_VIEW)
+  TestStringConvert(std::string_view("hello"));
+#endif  // ABSL_HAVE_STD_STRING_VIEW
 }
 
 TEST_F(FormatConvertTest, NullString) {
@@ -244,7 +265,7 @@
   }
   void* parsed = nullptr;
   if (sscanf(arg.c_str(), "%p", &parsed) != 1) {
-    ABSL_RAW_LOG(FATAL, "Could not parse %s", arg.c_str());
+    LOG(FATAL) << "Could not parse " << arg;
   }
   return ptr == parsed;
 }
@@ -438,25 +459,36 @@
 }
 
 TYPED_TEST_P(TypedFormatConvertTest, Char) {
+  // Pass a bunch of values of type TypeParam to both FormatPack and libc's
+  // vsnprintf("%c", ...) (wrapped in StrPrint) to make sure we get the same
+  // value.
   typedef TypeParam T;
   using remove_volatile_t = typename std::remove_volatile<T>::type;
-  static const T kMin = std::numeric_limits<remove_volatile_t>::min();
-  static const T kMax = std::numeric_limits<remove_volatile_t>::max();
-  T kVals[] = {
-    remove_volatile_t(1), remove_volatile_t(2), remove_volatile_t(10),
-    remove_volatile_t(-1), remove_volatile_t(-2), remove_volatile_t(-10),
-    remove_volatile_t(0),
-    kMin + remove_volatile_t(1), kMin,
-    kMax - remove_volatile_t(1), kMax
+  std::vector<remove_volatile_t> vals = {
+      remove_volatile_t(1),  remove_volatile_t(2),  remove_volatile_t(10),   //
+      remove_volatile_t(-1), remove_volatile_t(-2), remove_volatile_t(-10),  //
+      remove_volatile_t(0),
   };
-  for (const T &c : kVals) {
+
+  // We'd like to test values near std::numeric_limits::min() and
+  // std::numeric_limits::max(), too, but vsnprintf("%c", ...) can't handle
+  // anything larger than an int. Add in the most extreme values we can without
+  // exceeding that range.
+  static const T kMin =
+      static_cast<remove_volatile_t>(std::numeric_limits<int>::min());
+  static const T kMax =
+      static_cast<remove_volatile_t>(std::numeric_limits<int>::max());
+  vals.insert(vals.end(), {kMin + 1, kMin, kMax - 1, kMax});
+
+  for (const T c : vals) {
     const FormatArgImpl args[] = {FormatArgImpl(c)};
     UntypedFormatSpecImpl format("%c");
-    EXPECT_EQ(StrPrint("%c", c), FormatPack(format, absl::MakeSpan(args)));
+    EXPECT_EQ(StrPrint("%c", static_cast<int>(c)),
+              FormatPack(format, absl::MakeSpan(args)));
   }
 }
 
-REGISTER_TYPED_TEST_CASE_P(TypedFormatConvertTest, AllIntsWithFlags, Char);
+REGISTER_TYPED_TEST_SUITE_P(TypedFormatConvertTest, AllIntsWithFlags, Char);
 
 typedef ::testing::Types<
     int, unsigned, volatile int,
@@ -465,8 +497,8 @@
     long long, unsigned long long,
     signed char, unsigned char, char>
     AllIntTypes;
-INSTANTIATE_TYPED_TEST_CASE_P(TypedFormatConvertTestWithAllIntTypes,
-                              TypedFormatConvertTest, AllIntTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(TypedFormatConvertTestWithAllIntTypes,
+                               TypedFormatConvertTest, AllIntTypes);
 TEST_F(FormatConvertTest, VectorBool) {
   // Make sure vector<bool>'s values behave as bools.
   std::vector<bool> v = {true, false};
@@ -540,7 +572,8 @@
 }
 
 template <typename Floating>
-void TestWithMultipleFormatsHelper(const std::vector<Floating> &floats) {
+void TestWithMultipleFormatsHelper(const std::vector<Floating> &floats,
+                                   const std::set<Floating> &skip_verify) {
   const NativePrintfTraits &native_traits = VerifyNativeImplementation();
   // Reserve the space to ensure we don't allocate memory in the output itself.
   std::string str_format_result;
@@ -588,7 +621,16 @@
           AppendPack(&str_format_result, format, absl::MakeSpan(args));
         }
 
-        if (string_printf_result != str_format_result) {
+#ifdef _MSC_VER
+        // MSVC has a different rounding policy than us so we can't test our
+        // implementation against the native one there.
+        continue;
+#elif defined(__APPLE__)
+        // Apple formats NaN differently (+nan) vs. (nan)
+        if (std::isnan(d)) continue;
+#endif
+        if (string_printf_result != str_format_result &&
+            skip_verify.find(d) == skip_verify.end()) {
           // We use ASSERT_EQ here because failures are usually correlated and a
           // bug would print way too many failed expectations causing the test
           // to time out.
@@ -602,12 +644,6 @@
 }
 
 TEST_F(FormatConvertTest, Float) {
-#ifdef _MSC_VER
-  // MSVC has a different rounding policy than us so we can't test our
-  // implementation against the native one there.
-  return;
-#endif  // _MSC_VER
-
   std::vector<float> floats = {0.0f,
                                -0.0f,
                                .9999999f,
@@ -621,7 +657,8 @@
                                std::numeric_limits<float>::epsilon(),
                                std::numeric_limits<float>::epsilon() + 1.0f,
                                std::numeric_limits<float>::infinity(),
-                               -std::numeric_limits<float>::infinity()};
+                               -std::numeric_limits<float>::infinity(),
+                               std::nanf("")};
 
   // Some regression tests.
   floats.push_back(0.999999989f);
@@ -650,21 +687,14 @@
   std::sort(floats.begin(), floats.end());
   floats.erase(std::unique(floats.begin(), floats.end()), floats.end());
 
-#ifndef __APPLE__
-  // Apple formats NaN differently (+nan) vs. (nan)
-  floats.push_back(std::nan(""));
-#endif
-
-  TestWithMultipleFormatsHelper(floats);
+  TestWithMultipleFormatsHelper(floats, {});
 }
 
 TEST_F(FormatConvertTest, Double) {
-#ifdef _MSC_VER
-  // MSVC has a different rounding policy than us so we can't test our
-  // implementation against the native one there.
-  return;
-#endif  // _MSC_VER
-
+  // For values that we know won't match the standard library implementation we
+  // skip verification, but still run the algorithm to catch asserts/sanitizer
+  // bugs.
+  std::set<double> skip_verify;
   std::vector<double> doubles = {0.0,
                                  -0.0,
                                  .99999999999999,
@@ -678,7 +708,8 @@
                                  std::numeric_limits<double>::epsilon(),
                                  std::numeric_limits<double>::epsilon() + 1,
                                  std::numeric_limits<double>::infinity(),
-                                 -std::numeric_limits<double>::infinity()};
+                                 -std::numeric_limits<double>::infinity(),
+                                 std::nan("")};
 
   // Some regression tests.
   doubles.push_back(0.99999999999999989);
@@ -708,33 +739,29 @@
       "5084551339423045832369032229481658085593321233482747978262041447231"
       "68738177180919299881250404026184124858368.000000";
 
-  if (!gcc_bug_22142) {
-    for (int exp = -300; exp <= 300; ++exp) {
-      const double all_ones_mantissa = 0x1fffffffffffff;
-      doubles.push_back(std::ldexp(all_ones_mantissa, exp));
+  for (int exp = -300; exp <= 300; ++exp) {
+    const double all_ones_mantissa = 0x1fffffffffffff;
+    doubles.push_back(std::ldexp(all_ones_mantissa, exp));
+    if (gcc_bug_22142) {
+      skip_verify.insert(doubles.back());
     }
   }
 
   if (gcc_bug_22142) {
-    for (auto &d : doubles) {
-      using L = std::numeric_limits<double>;
-      double d2 = std::abs(d);
-      if (d2 == L::max() || d2 == L::min() || d2 == L::denorm_min()) {
-        d = 0;
-      }
-    }
+    using L = std::numeric_limits<double>;
+    skip_verify.insert(L::max());
+    skip_verify.insert(L::min());  // NOLINT
+    skip_verify.insert(L::denorm_min());
+    skip_verify.insert(-L::max());
+    skip_verify.insert(-L::min());  // NOLINT
+    skip_verify.insert(-L::denorm_min());
   }
 
   // Remove duplicates to speed up the logic below.
   std::sort(doubles.begin(), doubles.end());
   doubles.erase(std::unique(doubles.begin(), doubles.end()), doubles.end());
 
-#ifndef __APPLE__
-  // Apple formats NaN differently (+nan) vs. (nan)
-  doubles.push_back(std::nan(""));
-#endif
-
-  TestWithMultipleFormatsHelper(doubles);
+  TestWithMultipleFormatsHelper(doubles, skip_verify);
 }
 
 TEST_F(FormatConvertTest, DoubleRound) {
@@ -1055,11 +1082,6 @@
 }
 
 TEST_F(FormatConvertTest, LongDouble) {
-#ifdef _MSC_VER
-  // MSVC has a different rounding policy than us so we can't test our
-  // implementation against the native one there.
-  return;
-#endif  // _MSC_VER
   const NativePrintfTraits &native_traits = VerifyNativeImplementation();
   const char *const kFormats[] = {"%",    "%.3", "%8.5", "%9",  "%.5000",
                                   "%.60", "%+",  "% ",   "%-10"};
@@ -1120,10 +1142,18 @@
       for (auto d : doubles) {
         FormatArgImpl arg(d);
         UntypedFormatSpecImpl format(fmt_str);
+        std::string result = FormatPack(format, {&arg, 1});
+
+#ifdef _MSC_VER
+        // MSVC has a different rounding policy than us so we can't test our
+        // implementation against the native one there.
+        continue;
+#endif  // _MSC_VER
+
         // We use ASSERT_EQ here because failures are usually correlated and a
         // bug would print way too many failed expectations causing the test to
         // time out.
-        ASSERT_EQ(StrPrint(fmt_str.c_str(), d), FormatPack(format, {&arg, 1}))
+        ASSERT_EQ(StrPrint(fmt_str.c_str(), d), result)
             << fmt_str << " " << StrPrint("%.18Lg", d) << " "
             << StrPrint("%La", d) << " " << StrPrint("%.1080Lf", d);
       }
@@ -1212,9 +1242,9 @@
   const NativePrintfTraits &native_traits = VerifyNativeImplementation();
   // If one of the following tests break then it is either because the above PP
   // macro guards failed to exclude a new platform (likely) or because something
-  // has changed in the implemention of glibc sprintf float formatting behavior.
-  // If the latter, then the code that computes these flags needs to be
-  // revisited and/or possibly the StrFormat implementation.
+  // has changed in the implementation of glibc sprintf float formatting
+  // behavior.  If the latter, then the code that computes these flags needs to
+  // be revisited and/or possibly the StrFormat implementation.
   EXPECT_TRUE(native_traits.hex_float_has_glibc_rounding);
   EXPECT_TRUE(native_traits.hex_float_prefers_denormal_repr);
   EXPECT_TRUE(
diff --git a/abseil-cpp/absl/strings/internal/str_format/extension.cc b/abseil-cpp/absl/strings/internal/str_format/extension.cc
index bb0d96c..2a0ceb1 100644
--- a/abseil-cpp/absl/strings/internal/str_format/extension.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/extension.cc
@@ -23,16 +23,18 @@
 ABSL_NAMESPACE_BEGIN
 namespace str_format_internal {
 
-std::string Flags::ToString() const {
+std::string FlagsToString(Flags v) {
   std::string s;
-  s.append(left     ? "-" : "");
-  s.append(show_pos ? "+" : "");
-  s.append(sign_col ? " " : "");
-  s.append(alt      ? "#" : "");
-  s.append(zero     ? "0" : "");
+  s.append(FlagsContains(v, Flags::kLeft) ? "-" : "");
+  s.append(FlagsContains(v, Flags::kShowPos) ? "+" : "");
+  s.append(FlagsContains(v, Flags::kSignCol) ? " " : "");
+  s.append(FlagsContains(v, Flags::kAlt) ? "#" : "");
+  s.append(FlagsContains(v, Flags::kZero) ? "0" : "");
   return s;
 }
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
 #define ABSL_INTERNAL_X_VAL(id) \
   constexpr absl::FormatConversionChar FormatConversionCharInternal::id;
 ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
@@ -45,21 +47,19 @@
 ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
 #undef ABSL_INTERNAL_CHAR_SET_CASE
 
-// NOLINTNEXTLINE(readability-redundant-declaration)
 constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar;
-// NOLINTNEXTLINE(readability-redundant-declaration)
 constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral;
-// NOLINTNEXTLINE(readability-redundant-declaration)
 constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating;
-// NOLINTNEXTLINE(readability-redundant-declaration)
 constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric;
-// NOLINTNEXTLINE(readability-redundant-declaration)
 constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer;
 
+#endif  // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
 bool FormatSinkImpl::PutPaddedString(string_view value, int width,
                                      int precision, bool left) {
   size_t space_remaining = 0;
-  if (width >= 0) space_remaining = width;
+  if (width >= 0)
+    space_remaining = static_cast<size_t>(width);
   size_t n = value.size();
   if (precision >= 0) n = std::min(n, static_cast<size_t>(precision));
   string_view shown(value.data(), n);
diff --git a/abseil-cpp/absl/strings/internal/str_format/extension.h b/abseil-cpp/absl/strings/internal/str_format/extension.h
index a9b9e13..8de42d2 100644
--- a/abseil-cpp/absl/strings/internal/str_format/extension.h
+++ b/abseil-cpp/absl/strings/internal/str_format/extension.h
@@ -19,6 +19,7 @@
 #include <limits.h>
 
 #include <cstddef>
+#include <cstdint>
 #include <cstring>
 #include <ostream>
 
@@ -70,7 +71,7 @@
   ~FormatSinkImpl() { Flush(); }
 
   void Flush() {
-    raw_.Write(string_view(buf_, pos_ - buf_));
+    raw_.Write(string_view(buf_, static_cast<size_t>(pos_ - buf_)));
     pos_ = buf_;
   }
 
@@ -120,7 +121,9 @@
   }
 
  private:
-  size_t Avail() const { return buf_ + sizeof(buf_) - pos_; }
+  size_t Avail() const {
+    return static_cast<size_t>(buf_ + sizeof(buf_) - pos_);
+  }
 
   FormatRawSinkImpl raw_;
   size_t size_ = 0;
@@ -128,19 +131,33 @@
   char buf_[1024];
 };
 
-struct Flags {
-  bool basic : 1;     // fastest conversion: no flags, width, or precision
-  bool left : 1;      // "-"
-  bool show_pos : 1;  // "+"
-  bool sign_col : 1;  // " "
-  bool alt : 1;       // "#"
-  bool zero : 1;      // "0"
-  std::string ToString() const;
-  friend std::ostream& operator<<(std::ostream& os, const Flags& v) {
-    return os << v.ToString();
-  }
+enum class Flags : uint8_t {
+  kBasic = 0,
+  kLeft = 1 << 0,
+  kShowPos = 1 << 1,
+  kSignCol = 1 << 2,
+  kAlt = 1 << 3,
+  kZero = 1 << 4,
+  // This is not a real flag. It just exists to turn off kBasic when no other
+  // flags are set. This is for when width/precision are specified.
+  kNonBasic = 1 << 5,
 };
 
+constexpr Flags operator|(Flags a, Flags b) {
+  return static_cast<Flags>(static_cast<uint8_t>(a) | static_cast<uint8_t>(b));
+}
+
+constexpr bool FlagsContains(Flags haystack, Flags needle) {
+  return (static_cast<uint8_t>(haystack) & static_cast<uint8_t>(needle)) ==
+         static_cast<uint8_t>(needle);
+}
+
+std::string FlagsToString(Flags v);
+
+inline std::ostream& operator<<(std::ostream& os, Flags v) {
+  return os << FlagsToString(v);
+}
+
 // clang-format off
 #define ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(X_VAL, X_SEP) \
   /* text */ \
@@ -152,7 +169,7 @@
   X_VAL(f) X_SEP X_VAL(F) X_SEP X_VAL(e) X_SEP X_VAL(E) X_SEP \
   X_VAL(g) X_SEP X_VAL(G) X_SEP X_VAL(a) X_SEP X_VAL(A) X_SEP \
   /* misc */ \
-  X_VAL(n) X_SEP X_VAL(p)
+  X_VAL(n) X_SEP X_VAL(p) X_SEP X_VAL(v)
 // clang-format on
 
 // This type should not be referenced, it exists only to provide labels
@@ -174,7 +191,7 @@
     c, s,                    // text
     d, i, o, u, x, X,        // int
     f, F, e, E, g, G, a, A,  // float
-    n, p,                    // misc
+    n, p, v,                    // misc
     kNone
   };
   // clang-format on
@@ -256,13 +273,17 @@
 
 class FormatConversionSpecImpl {
  public:
-  // Width and precison are not specified, no flags are set.
-  bool is_basic() const { return flags_.basic; }
-  bool has_left_flag() const { return flags_.left; }
-  bool has_show_pos_flag() const { return flags_.show_pos; }
-  bool has_sign_col_flag() const { return flags_.sign_col; }
-  bool has_alt_flag() const { return flags_.alt; }
-  bool has_zero_flag() const { return flags_.zero; }
+  // Width and precision are not specified, no flags are set.
+  bool is_basic() const { return flags_ == Flags::kBasic; }
+  bool has_left_flag() const { return FlagsContains(flags_, Flags::kLeft); }
+  bool has_show_pos_flag() const {
+    return FlagsContains(flags_, Flags::kShowPos);
+  }
+  bool has_sign_col_flag() const {
+    return FlagsContains(flags_, Flags::kSignCol);
+  }
+  bool has_alt_flag() const { return FlagsContains(flags_, Flags::kAlt); }
+  bool has_zero_flag() const { return FlagsContains(flags_, Flags::kZero); }
 
   FormatConversionChar conversion_char() const {
     // Keep this field first in the struct . It generates better code when
@@ -271,6 +292,8 @@
     return conv_;
   }
 
+  void set_conversion_char(FormatConversionChar c) { conv_ = c; }
+
   // Returns the specified width. If width is unspecfied, it returns a negative
   // value.
   int width() const { return width_; }
@@ -306,7 +329,7 @@
     conv->precision_ = p;
   }
   static std::string FlagsToString(const FormatConversionSpecImpl& spec) {
-    return spec.flags_.ToString();
+    return str_format_internal::FlagsToString(spec.flags_);
   }
 };
 
diff --git a/abseil-cpp/absl/strings/internal/str_format/extension_test.cc b/abseil-cpp/absl/strings/internal/str_format/extension_test.cc
index 1c93fdb..694c126 100644
--- a/abseil-cpp/absl/strings/internal/str_format/extension_test.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/extension_test.cc
@@ -19,6 +19,7 @@
 #include <random>
 #include <string>
 
+#include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/strings/str_format.h"
 #include "absl/strings/string_view.h"
@@ -95,4 +96,14 @@
 #undef X_VAL
 }
 
+TEST(FormatExtensionTest, SetConversionChar) {
+  absl::str_format_internal::FormatConversionSpecImpl spec;
+  EXPECT_EQ(spec.conversion_char(),
+            absl::str_format_internal::FormatConversionCharInternal::kNone);
+  spec.set_conversion_char(
+      absl::str_format_internal::FormatConversionCharInternal::d);
+  EXPECT_EQ(spec.conversion_char(),
+            absl::str_format_internal::FormatConversionCharInternal::d);
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc b/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc
index 20aeada..8edf520 100644
--- a/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/internal/str_format/float_conversion.h"
 
 #include <string.h>
@@ -10,11 +24,12 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/bits.h"
 #include "absl/base/optimization.h"
 #include "absl/functional/function_ref.h"
 #include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
+#include "absl/numeric/internal/representation.h"
 #include "absl/strings/numbers.h"
 #include "absl/types/optional.h"
 #include "absl/types/span.h"
@@ -25,6 +40,8 @@
 
 namespace {
 
+using ::absl::numeric_internal::IsDoubleDouble;
+
 // The code below wants to avoid heap allocations.
 // To do so it needs to allocate memory on the stack.
 // `StackArray` will allocate memory on the stack in the form of a uint32_t
@@ -75,41 +92,47 @@
 
 // Calculates `10 * (*v) + carry` and stores the result in `*v` and returns
 // the carry.
+// Requires: `0 <= carry <= 9`
 template <typename Int>
-inline Int MultiplyBy10WithCarry(Int *v, Int carry) {
+inline char MultiplyBy10WithCarry(Int* v, char carry) {
   using BiggerInt = absl::conditional_t<sizeof(Int) == 4, uint64_t, uint128>;
-  BiggerInt tmp = 10 * static_cast<BiggerInt>(*v) + carry;
+  BiggerInt tmp =
+      10 * static_cast<BiggerInt>(*v) + static_cast<BiggerInt>(carry);
   *v = static_cast<Int>(tmp);
-  return static_cast<Int>(tmp >> (sizeof(Int) * 8));
+  return static_cast<char>(tmp >> (sizeof(Int) * 8));
 }
 
 // Calculates `(2^64 * carry + *v) / 10`.
 // Stores the quotient in `*v` and returns the remainder.
 // Requires: `0 <= carry <= 9`
-inline uint64_t DivideBy10WithCarry(uint64_t *v, uint64_t carry) {
+inline char DivideBy10WithCarry(uint64_t* v, char carry) {
   constexpr uint64_t divisor = 10;
   // 2^64 / divisor = chunk_quotient + chunk_remainder / divisor
   constexpr uint64_t chunk_quotient = (uint64_t{1} << 63) / (divisor / 2);
   constexpr uint64_t chunk_remainder = uint64_t{} - chunk_quotient * divisor;
 
+  const uint64_t carry_u64 = static_cast<uint64_t>(carry);
   const uint64_t mod = *v % divisor;
-  const uint64_t next_carry = chunk_remainder * carry + mod;
-  *v = *v / divisor + carry * chunk_quotient + next_carry / divisor;
-  return next_carry % divisor;
+  const uint64_t next_carry = chunk_remainder * carry_u64 + mod;
+  *v = *v / divisor + carry_u64 * chunk_quotient + next_carry / divisor;
+  return static_cast<char>(next_carry % divisor);
 }
 
+using MaxFloatType =
+    typename std::conditional<IsDoubleDouble(), double, long double>::type;
+
 // Generates the decimal representation for an integer of the form `v * 2^exp`,
 // where `v` and `exp` are both positive integers.
 // It generates the digits from the left (ie the most significant digit first)
 // to allow for direct printing into the sink.
 //
-// Requires `0 <= exp` and `exp <= numeric_limits<long double>::max_exponent`.
+// Requires `0 <= exp` and `exp <= numeric_limits<MaxFloatType>::max_exponent`.
 class BinaryToDecimal {
-  static constexpr int ChunksNeeded(int exp) {
+  static constexpr size_t ChunksNeeded(int exp) {
     // We will left shift a uint128 by `exp` bits, so we need `128+exp` total
     // bits. Round up to 32.
     // See constructor for details about adding `10%` to the value.
-    return (128 + exp + 31) / 32 * 11 / 10;
+    return static_cast<size_t>((128 + exp + 31) / 32 * 11 / 10);
   }
 
  public:
@@ -118,10 +141,10 @@
   static void RunConversion(uint128 v, int exp,
                             absl::FunctionRef<void(BinaryToDecimal)> f) {
     assert(exp > 0);
-    assert(exp <= std::numeric_limits<long double>::max_exponent);
+    assert(exp <= std::numeric_limits<MaxFloatType>::max_exponent);
     static_assert(
         StackArray::kMaxCapacity >=
-            ChunksNeeded(std::numeric_limits<long double>::max_exponent),
+            ChunksNeeded(std::numeric_limits<MaxFloatType>::max_exponent),
         "");
 
     StackArray::RunWithCapacity(
@@ -129,9 +152,9 @@
         [=](absl::Span<uint32_t> input) { f(BinaryToDecimal(input, v, exp)); });
   }
 
-  int TotalDigits() const {
-    return static_cast<int>((decimal_end_ - decimal_start_) * kDigitsPerChunk +
-                            CurrentDigits().size());
+  size_t TotalDigits() const {
+    return (decimal_end_ - decimal_start_) * kDigitsPerChunk +
+           CurrentDigits().size();
   }
 
   // See the current block of digits.
@@ -170,30 +193,31 @@
     // the decimal representation is around 7% less efficient in space than the
     // binary one. We allocate an extra 10% memory to account for this. See
     // ChunksNeeded for this calculation.
-    int chunk_index = exp / 32;
+    size_t after_chunk_index = static_cast<size_t>(exp / 32 + 1);
     decimal_start_ = decimal_end_ = ChunksNeeded(exp);
     const int offset = exp % 32;
     // Left shift v by exp bits.
-    data_[chunk_index] = static_cast<uint32_t>(v << offset);
+    data_[after_chunk_index - 1] = static_cast<uint32_t>(v << offset);
     for (v >>= (32 - offset); v; v >>= 32)
-      data_[++chunk_index] = static_cast<uint32_t>(v);
+      data_[++after_chunk_index - 1] = static_cast<uint32_t>(v);
 
-    while (chunk_index >= 0) {
+    while (after_chunk_index > 0) {
       // While we have more than one chunk available, go in steps of 1e9.
-      // `data_[chunk_index]` holds the highest non-zero binary chunk, so keep
-      // the variable updated.
+      // `data_[after_chunk_index - 1]` holds the highest non-zero binary chunk,
+      // so keep the variable updated.
       uint32_t carry = 0;
-      for (int i = chunk_index; i >= 0; --i) {
-        uint64_t tmp = uint64_t{data_[i]} + (uint64_t{carry} << 32);
-        data_[i] = static_cast<uint32_t>(tmp / uint64_t{1000000000});
+      for (size_t i = after_chunk_index; i > 0; --i) {
+        uint64_t tmp = uint64_t{data_[i - 1]} + (uint64_t{carry} << 32);
+        data_[i - 1] = static_cast<uint32_t>(tmp / uint64_t{1000000000});
         carry = static_cast<uint32_t>(tmp % uint64_t{1000000000});
       }
 
       // If the highest chunk is now empty, remove it from view.
-      if (data_[chunk_index] == 0) --chunk_index;
+      if (data_[after_chunk_index - 1] == 0)
+        --after_chunk_index;
 
       --decimal_start_;
-      assert(decimal_start_ != chunk_index);
+      assert(decimal_start_ != after_chunk_index - 1);
       data_[decimal_start_] = carry;
     }
 
@@ -205,51 +229,52 @@
   }
 
  private:
-  static constexpr int kDigitsPerChunk = 9;
+  static constexpr size_t kDigitsPerChunk = 9;
 
-  int decimal_start_;
-  int decimal_end_;
+  size_t decimal_start_;
+  size_t decimal_end_;
 
   char digits_[kDigitsPerChunk];
-  int size_ = 0;
+  size_t size_ = 0;
 
   absl::Span<uint32_t> data_;
 };
 
 // Converts a value of the form `x * 2^-exp` into a sequence of decimal digits.
 // Requires `-exp < 0` and
-// `-exp >= limits<long double>::min_exponent - limits<long double>::digits`.
+// `-exp >= limits<MaxFloatType>::min_exponent - limits<MaxFloatType>::digits`.
 class FractionalDigitGenerator {
  public:
   // Run the conversion for `v * 2^exp` and call `f(generator)`.
   // This function will allocate enough stack space to perform the conversion.
   static void RunConversion(
       uint128 v, int exp, absl::FunctionRef<void(FractionalDigitGenerator)> f) {
-    using Limits = std::numeric_limits<long double>;
+    using Limits = std::numeric_limits<MaxFloatType>;
     assert(-exp < 0);
     assert(-exp >= Limits::min_exponent - 128);
     static_assert(StackArray::kMaxCapacity >=
                       (Limits::digits + 128 - Limits::min_exponent + 31) / 32,
                   "");
-    StackArray::RunWithCapacity((Limits::digits + exp + 31) / 32,
-                                [=](absl::Span<uint32_t> input) {
-                                  f(FractionalDigitGenerator(input, v, exp));
-                                });
+    StackArray::RunWithCapacity(
+        static_cast<size_t>((Limits::digits + exp + 31) / 32),
+        [=](absl::Span<uint32_t> input) {
+          f(FractionalDigitGenerator(input, v, exp));
+        });
   }
 
   // Returns true if there are any more non-zero digits left.
-  bool HasMoreDigits() const { return next_digit_ != 0 || chunk_index_ >= 0; }
+  bool HasMoreDigits() const { return next_digit_ != 0 || after_chunk_index_; }
 
   // Returns true if the remainder digits are greater than 5000...
   bool IsGreaterThanHalf() const {
-    return next_digit_ > 5 || (next_digit_ == 5 && chunk_index_ >= 0);
+    return next_digit_ > 5 || (next_digit_ == 5 && after_chunk_index_);
   }
   // Returns true if the remainder digits are exactly 5000...
-  bool IsExactlyHalf() const { return next_digit_ == 5 && chunk_index_ < 0; }
+  bool IsExactlyHalf() const { return next_digit_ == 5 && !after_chunk_index_; }
 
   struct Digits {
-    int digit_before_nine;
-    int num_nines;
+    char digit_before_nine;
+    size_t num_nines;
   };
 
   // Get the next set of digits.
@@ -268,45 +293,46 @@
 
  private:
   // Return the next digit.
-  int GetOneDigit() {
-    if (chunk_index_ < 0) return 0;
+  char GetOneDigit() {
+    if (!after_chunk_index_)
+      return 0;
 
-    uint32_t carry = 0;
-    for (int i = chunk_index_; i >= 0; --i) {
-      carry = MultiplyBy10WithCarry(&data_[i], carry);
+    char carry = 0;
+    for (size_t i = after_chunk_index_; i > 0; --i) {
+      carry = MultiplyBy10WithCarry(&data_[i - 1], carry);
     }
     // If the lowest chunk is now empty, remove it from view.
-    if (data_[chunk_index_] == 0) --chunk_index_;
+    if (data_[after_chunk_index_ - 1] == 0)
+      --after_chunk_index_;
     return carry;
   }
 
   FractionalDigitGenerator(absl::Span<uint32_t> data, uint128 v, int exp)
-      : chunk_index_(exp / 32), data_(data) {
+      : after_chunk_index_(static_cast<size_t>(exp / 32 + 1)), data_(data) {
     const int offset = exp % 32;
     // Right shift `v` by `exp` bits.
-    data_[chunk_index_] = static_cast<uint32_t>(v << (32 - offset));
+    data_[after_chunk_index_ - 1] = static_cast<uint32_t>(v << (32 - offset));
     v >>= offset;
     // Make sure we don't overflow the data. We already calculated that
     // non-zero bits fit, so we might not have space for leading zero bits.
-    for (int pos = chunk_index_; v; v >>= 32)
+    for (size_t pos = after_chunk_index_ - 1; v; v >>= 32)
       data_[--pos] = static_cast<uint32_t>(v);
 
     // Fill next_digit_, as GetDigits expects it to be populated always.
     next_digit_ = GetOneDigit();
   }
 
-  int next_digit_;
-  int chunk_index_;
+  char next_digit_;
+  size_t after_chunk_index_;
   absl::Span<uint32_t> data_;
 };
 
 // Count the number of leading zero bits.
-int LeadingZeros(uint64_t v) { return base_internal::CountLeadingZeros64(v); }
+int LeadingZeros(uint64_t v) { return countl_zero(v); }
 int LeadingZeros(uint128 v) {
   auto high = static_cast<uint64_t>(v >> 64);
   auto low = static_cast<uint64_t>(v);
-  return high != 0 ? base_internal::CountLeadingZeros64(high)
-                   : 64 + base_internal::CountLeadingZeros64(low);
+  return high != 0 ? countl_zero(high) : 64 + countl_zero(low);
 }
 
 // Round up the text digits starting at `p`.
@@ -343,7 +369,7 @@
   auto low = static_cast<uint64_t>(v);
 
   while (high != 0) {
-    uint64_t carry = DivideBy10WithCarry(&high, 0);
+    char carry = DivideBy10WithCarry(&high, 0);
     carry = DivideBy10WithCarry(&low, carry);
     *--p = carry + '0';
   }
@@ -354,13 +380,15 @@
 // shifting.
 // Performs rounding if necessary to fit within `precision`.
 // Returns the pointer to one after the last character written.
-char *PrintFractionalDigitsFast(uint64_t v, char *start, int exp,
-                                int precision) {
+char* PrintFractionalDigitsFast(uint64_t v,
+                                char* start,
+                                int exp,
+                                size_t precision) {
   char *p = start;
   v <<= (64 - exp);
   while (precision > 0) {
     if (!v) return p;
-    *p++ = MultiplyBy10WithCarry(&v, uint64_t{0}) + '0';
+    *p++ = MultiplyBy10WithCarry(&v, 0) + '0';
     --precision;
   }
 
@@ -374,8 +402,6 @@
     RoundToEven(p - 1);
   }
 
-  assert(precision == 0);
-  // Precision can only be zero here.
   return p;
 }
 
@@ -383,8 +409,10 @@
 // after shifting.
 // Performs rounding if necessary to fit within `precision`.
 // Returns the pointer to one after the last character written.
-char *PrintFractionalDigitsFast(uint128 v, char *start, int exp,
-                                int precision) {
+char* PrintFractionalDigitsFast(uint128 v,
+                                char* start,
+                                int exp,
+                                size_t precision) {
   char *p = start;
   v <<= (128 - exp);
   auto high = static_cast<uint64_t>(v >> 64);
@@ -393,7 +421,7 @@
   // While we have digits to print and `low` is not empty, do the long
   // multiplication.
   while (precision > 0 && low != 0) {
-    uint64_t carry = MultiplyBy10WithCarry(&low, uint64_t{0});
+    char carry = MultiplyBy10WithCarry(&low, 0);
     carry = MultiplyBy10WithCarry(&high, carry);
 
     *p++ = carry + '0';
@@ -405,7 +433,7 @@
   // above.
   while (precision > 0) {
     if (!high) return p;
-    *p++ = MultiplyBy10WithCarry(&high, uint64_t{0}) + '0';
+    *p++ = MultiplyBy10WithCarry(&high, 0) + '0';
     --precision;
   }
 
@@ -419,14 +447,12 @@
     RoundToEven(p - 1);
   }
 
-  assert(precision == 0);
-  // Precision can only be zero here.
   return p;
 }
 
 struct FormatState {
   char sign_char;
-  int precision;
+  size_t precision;
   const FormatConversionSpecImpl &conv;
   FormatSinkImpl *sink;
 
@@ -436,9 +462,9 @@
 };
 
 struct Padding {
-  int left_spaces;
-  int zeros;
-  int right_spaces;
+  size_t left_spaces;
+  size_t zeros;
+  size_t right_spaces;
 };
 
 Padding ExtraWidthToPadding(size_t total_size, const FormatState &state) {
@@ -446,7 +472,7 @@
       static_cast<size_t>(state.conv.width()) <= total_size) {
     return {0, 0, 0};
   }
-  int missing_chars = state.conv.width() - total_size;
+  size_t missing_chars = static_cast<size_t>(state.conv.width()) - total_size;
   if (state.conv.has_left_flag()) {
     return {0, 0, missing_chars};
   } else if (state.conv.has_zero_flag()) {
@@ -456,8 +482,10 @@
   }
 }
 
-void FinalPrint(const FormatState &state, absl::string_view data,
-                int padding_offset, int trailing_zeros,
+void FinalPrint(const FormatState& state,
+                absl::string_view data,
+                size_t padding_offset,
+                size_t trailing_zeros,
                 absl::string_view data_postfix) {
   if (state.conv.width() < 0) {
     // No width specified. Fast-path.
@@ -468,10 +496,10 @@
     return;
   }
 
-  auto padding = ExtraWidthToPadding((state.sign_char != '\0' ? 1 : 0) +
-                                         data.size() + data_postfix.size() +
-                                         static_cast<size_t>(trailing_zeros),
-                                     state);
+  auto padding =
+      ExtraWidthToPadding((state.sign_char != '\0' ? 1 : 0) + data.size() +
+                              data_postfix.size() + trailing_zeros,
+                          state);
 
   state.sink->Append(padding.left_spaces, ' ');
   if (state.sign_char != '\0') state.sink->Append(1, state.sign_char);
@@ -528,15 +556,16 @@
     if (integral_digits_start[-1] != '0') --integral_digits_start;
   }
 
-  size_t size = fractional_digits_end - integral_digits_start;
+  size_t size =
+      static_cast<size_t>(fractional_digits_end - integral_digits_start);
 
   // In `alt` mode (flag #) we keep the `.` even if there are no fractional
   // digits. In non-alt mode, we strip it.
   if (!state.ShouldPrintDot()) --size;
   FinalPrint(state, absl::string_view(integral_digits_start, size),
              /*padding_offset=*/0,
-             static_cast<int>(state.precision - (fractional_digits_end -
-                                                 fractional_digits_start)),
+             state.precision - static_cast<size_t>(fractional_digits_end -
+                                                   fractional_digits_start),
              /*data_postfix=*/"");
 }
 
@@ -548,21 +577,22 @@
 void FormatFPositiveExpSlow(uint128 v, int exp, const FormatState &state) {
   BinaryToDecimal::RunConversion(v, exp, [&](BinaryToDecimal btd) {
     const size_t total_digits =
-        btd.TotalDigits() +
-        (state.ShouldPrintDot() ? static_cast<size_t>(state.precision) + 1 : 0);
+        btd.TotalDigits() + (state.ShouldPrintDot() ? state.precision + 1 : 0);
 
     const auto padding = ExtraWidthToPadding(
         total_digits + (state.sign_char != '\0' ? 1 : 0), state);
 
     state.sink->Append(padding.left_spaces, ' ');
-    if (state.sign_char != '\0') state.sink->Append(1, state.sign_char);
+    if (state.sign_char != '\0')
+      state.sink->Append(1, state.sign_char);
     state.sink->Append(padding.zeros, '0');
 
     do {
       state.sink->Append(btd.CurrentDigits());
     } while (btd.AdvanceDigits());
 
-    if (state.ShouldPrintDot()) state.sink->Append(1, '.');
+    if (state.ShouldPrintDot())
+      state.sink->Append(1, '.');
     state.sink->Append(state.precision, '0');
     state.sink->Append(padding.right_spaces, ' ');
   });
@@ -575,8 +605,7 @@
 // digits.
 void FormatFNegativeExpSlow(uint128 v, int exp, const FormatState &state) {
   const size_t total_digits =
-      /* 0 */ 1 +
-      (state.ShouldPrintDot() ? static_cast<size_t>(state.precision) + 1 : 0);
+      /* 0 */ 1 + (state.ShouldPrintDot() ? state.precision + 1 : 0);
   auto padding =
       ExtraWidthToPadding(total_digits + (state.sign_char ? 1 : 0), state);
   padding.zeros += 1;
@@ -587,7 +616,7 @@
   if (state.ShouldPrintDot()) state.sink->Append(1, '.');
 
   // Print digits
-  int digits_to_go = state.precision;
+  size_t digits_to_go = state.precision;
 
   FractionalDigitGenerator::RunConversion(
       v, exp, [&](FractionalDigitGenerator digit_gen) {
@@ -647,7 +676,8 @@
 template <typename Int>
 void FormatF(Int mantissa, int exp, const FormatState &state) {
   if (exp >= 0) {
-    const int total_bits = sizeof(Int) * 8 - LeadingZeros(mantissa) + exp;
+    const int total_bits =
+        static_cast<int>(sizeof(Int) * 8) - LeadingZeros(mantissa) + exp;
 
     // Fallback to the slow stack-based approach if we can't do it in a 64 or
     // 128 bit state.
@@ -667,9 +697,9 @@
 // Grab the group of four bits (nibble) from `n`. E.g., nibble 1 corresponds to
 // bits 4-7.
 template <typename Int>
-uint8_t GetNibble(Int n, int nibble_index) {
+uint8_t GetNibble(Int n, size_t nibble_index) {
   constexpr Int mask_low_nibble = Int{0xf};
-  int shift = nibble_index * 4;
+  int shift = static_cast<int>(nibble_index * 4);
   n &= mask_low_nibble << shift;
   return static_cast<uint8_t>((n >> shift) & 0xf);
 }
@@ -677,38 +707,42 @@
 // Add one to the given nibble, applying carry to higher nibbles. Returns true
 // if overflow, false otherwise.
 template <typename Int>
-bool IncrementNibble(int nibble_index, Int *n) {
-  constexpr int kShift = sizeof(Int) * 8 - 1;
-  constexpr int kNumNibbles = sizeof(Int) * 8 / 4;
+bool IncrementNibble(size_t nibble_index, Int* n) {
+  constexpr size_t kShift = sizeof(Int) * 8 - 1;
+  constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4;
   Int before = *n >> kShift;
-  // Here we essentially want to take the number 1 and move it into the requsted
-  // nibble, then add it to *n to effectively increment the nibble. However,
-  // ASan will complain if we try to shift the 1 beyond the limits of the Int,
-  // i.e., if the nibble_index is out of range. So therefore we check for this
-  // and if we are out of range we just add 0 which leaves *n unchanged, which
-  // seems like the reasonable thing to do in that case.
-  *n += ((nibble_index >= kNumNibbles) ? 0 : (Int{1} << (nibble_index * 4)));
+  // Here we essentially want to take the number 1 and move it into the
+  // requested nibble, then add it to *n to effectively increment the nibble.
+  // However, ASan will complain if we try to shift the 1 beyond the limits of
+  // the Int, i.e., if the nibble_index is out of range. So therefore we check
+  // for this and if we are out of range we just add 0 which leaves *n
+  // unchanged, which seems like the reasonable thing to do in that case.
+  *n += ((nibble_index >= kNumNibbles)
+             ? 0
+             : (Int{1} << static_cast<int>(nibble_index * 4)));
   Int after = *n >> kShift;
   return (before && !after) || (nibble_index >= kNumNibbles);
 }
 
 // Return a mask with 1's in the given nibble and all lower nibbles.
 template <typename Int>
-Int MaskUpToNibbleInclusive(int nibble_index) {
-  constexpr int kNumNibbles = sizeof(Int) * 8 / 4;
+Int MaskUpToNibbleInclusive(size_t nibble_index) {
+  constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4;
   static const Int ones = ~Int{0};
-  return ones >> std::max(0, 4 * (kNumNibbles - nibble_index - 1));
+  ++nibble_index;
+  return ones >> static_cast<int>(
+                     4 * (std::max(kNumNibbles, nibble_index) - nibble_index));
 }
 
 // Return a mask with 1's below the given nibble.
 template <typename Int>
-Int MaskUpToNibbleExclusive(int nibble_index) {
-  return nibble_index <= 0 ? 0 : MaskUpToNibbleInclusive<Int>(nibble_index - 1);
+Int MaskUpToNibbleExclusive(size_t nibble_index) {
+  return nibble_index == 0 ? 0 : MaskUpToNibbleInclusive<Int>(nibble_index - 1);
 }
 
 template <typename Int>
-Int MoveToNibble(uint8_t nibble, int nibble_index) {
-  return Int{nibble} << (4 * nibble_index);
+Int MoveToNibble(uint8_t nibble, size_t nibble_index) {
+  return Int{nibble} << static_cast<int>(4 * nibble_index);
 }
 
 // Given mantissa size, find optimal # of mantissa bits to put in initial digit.
@@ -725,10 +759,10 @@
 // a multiple of four. Once again, the goal is to have all fractional digits
 // represent real precision.
 template <typename Float>
-constexpr int HexFloatLeadingDigitSizeInBits() {
+constexpr size_t HexFloatLeadingDigitSizeInBits() {
   return std::numeric_limits<Float>::digits % 4 > 0
-             ? std::numeric_limits<Float>::digits % 4
-             : 4;
+             ? static_cast<size_t>(std::numeric_limits<Float>::digits % 4)
+             : size_t{4};
 }
 
 // This function captures the rounding behavior of glibc for hex float
@@ -738,16 +772,17 @@
 // point that is not followed by 800000..., it disregards the parity and rounds
 // up if > 8 and rounds down if < 8.
 template <typename Int>
-bool HexFloatNeedsRoundUp(Int mantissa, int final_nibble_displayed,
+bool HexFloatNeedsRoundUp(Int mantissa,
+                          size_t final_nibble_displayed,
                           uint8_t leading) {
   // If the last nibble (hex digit) to be displayed is the lowest on in the
   // mantissa then that means that we don't have any further nibbles to inform
   // rounding, so don't round.
-  if (final_nibble_displayed <= 0) {
+  if (final_nibble_displayed == 0) {
     return false;
   }
-  int rounding_nibble_idx = final_nibble_displayed - 1;
-  constexpr int kTotalNibbles = sizeof(Int) * 8 / 4;
+  size_t rounding_nibble_idx = final_nibble_displayed - 1;
+  constexpr size_t kTotalNibbles = sizeof(Int) * 8 / 4;
   assert(final_nibble_displayed <= kTotalNibbles);
   Int mantissa_up_to_rounding_nibble_inclusive =
       mantissa & MaskUpToNibbleInclusive<Int>(rounding_nibble_idx);
@@ -774,7 +809,7 @@
   }
 
   int min_exponent;
-  int leading_digit_size_bits;
+  size_t leading_digit_size_bits;
 };
 
 // Hex Float Rounding. First check if we need to round; if so, then we do that
@@ -784,10 +819,12 @@
 template <typename Int>
 void FormatARound(bool precision_specified, const FormatState &state,
                   uint8_t *leading, Int *mantissa, int *exp) {
-  constexpr int kTotalNibbles = sizeof(Int) * 8 / 4;
+  constexpr size_t kTotalNibbles = sizeof(Int) * 8 / 4;
   // Index of the last nibble that we could display given precision.
-  int final_nibble_displayed =
-      precision_specified ? std::max(0, (kTotalNibbles - state.precision)) : 0;
+  size_t final_nibble_displayed =
+      precision_specified
+          ? (std::max(kTotalNibbles, state.precision) - state.precision)
+          : 0;
   if (HexFloatNeedsRoundUp(*mantissa, final_nibble_displayed, *leading)) {
     // Need to round up.
     bool overflow = IncrementNibble(final_nibble_displayed, mantissa);
@@ -811,9 +848,9 @@
 template <typename Int>
 void FormatANormalize(const HexFloatTypeParams float_traits, uint8_t *leading,
                       Int *mantissa, int *exp) {
-  constexpr int kIntBits = sizeof(Int) * 8;
+  constexpr size_t kIntBits = sizeof(Int) * 8;
   static const Int kHighIntBit = Int{1} << (kIntBits - 1);
-  const int kLeadDigitBitsCount = float_traits.leading_digit_size_bits;
+  const size_t kLeadDigitBitsCount = float_traits.leading_digit_size_bits;
   // Normalize mantissa so that highest bit set is in MSB position, unless we
   // get interrupted by the exponent threshold.
   while (*mantissa && !(*mantissa & kHighIntBit)) {
@@ -827,18 +864,18 @@
   }
   // Extract bits for leading digit then shift them away leaving the
   // fractional part.
-  *leading =
-      static_cast<uint8_t>(*mantissa >> (kIntBits - kLeadDigitBitsCount));
-  *exp -= (*mantissa != 0) ? kLeadDigitBitsCount : *exp;
-  *mantissa <<= kLeadDigitBitsCount;
+  *leading = static_cast<uint8_t>(
+      *mantissa >> static_cast<int>(kIntBits - kLeadDigitBitsCount));
+  *exp -= (*mantissa != 0) ? static_cast<int>(kLeadDigitBitsCount) : *exp;
+  *mantissa <<= static_cast<int>(kLeadDigitBitsCount);
 }
 
 template <typename Int>
 void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp,
              bool uppercase, const FormatState &state) {
   // Int properties.
-  constexpr int kIntBits = sizeof(Int) * 8;
-  constexpr int kTotalNibbles = sizeof(Int) * 8 / 4;
+  constexpr size_t kIntBits = sizeof(Int) * 8;
+  constexpr size_t kTotalNibbles = sizeof(Int) * 8 / 4;
   // Did the user specify a precision explicitly?
   const bool precision_specified = state.conv.precision() >= 0;
 
@@ -858,10 +895,10 @@
   // This buffer holds the "0x1.ab1de3" portion of "0x1.ab1de3pe+2". Compute the
   // size with long double which is the largest of the floats.
   constexpr size_t kBufSizeForHexFloatRepr =
-      2                                               // 0x
-      + std::numeric_limits<long double>::digits / 4  // number of hex digits
-      + 1                                             // round up
-      + 1;                                            // "." (dot)
+      2                                                // 0x
+      + std::numeric_limits<MaxFloatType>::digits / 4  // number of hex digits
+      + 1                                              // round up
+      + 1;                                             // "." (dot)
   char digits_buffer[kBufSizeForHexFloatRepr];
   char *digits_iter = digits_buffer;
   const char *const digits =
@@ -884,20 +921,23 @@
   }
 
   // ============ Fractional Digits ============
-  int digits_emitted = 0;
+  size_t digits_emitted = 0;
   while (mantissa > 0) {
     *digits_iter++ = digits[GetNibble(mantissa, kTotalNibbles - 1)];
     mantissa <<= 4;
     ++digits_emitted;
   }
-  int trailing_zeros =
-      precision_specified ? state.precision - digits_emitted : 0;
-  assert(trailing_zeros >= 0);
-  auto digits_result = string_view(digits_buffer, digits_iter - digits_buffer);
+  size_t trailing_zeros = 0;
+  if (precision_specified) {
+    assert(state.precision >= digits_emitted);
+    trailing_zeros = state.precision - digits_emitted;
+  }
+  auto digits_result = string_view(
+      digits_buffer, static_cast<size_t>(digits_iter - digits_buffer));
 
   // =============== Exponent ==================
   constexpr size_t kBufSizeForExpDecRepr =
-      numbers_internal::kFastToBufferSize  // requred for FastIntToBuffer
+      numbers_internal::kFastToBufferSize  // required for FastIntToBuffer
       + 1                                  // 'p' or 'P'
       + 1;                                 // '+' or '-'
   char exp_buffer[kBufSizeForExpDecRepr];
@@ -906,11 +946,11 @@
   numbers_internal::FastIntToBuffer(exp < 0 ? -exp : exp, exp_buffer + 2);
 
   // ============ Assemble Result ==============
-  FinalPrint(state,           //
-             digits_result,   // 0xN.NNN...
-             2,               // offset in `data` to start padding if needed.
-             trailing_zeros,  // num remaining mantissa padding zeros
-             exp_buffer);     // exponent
+  FinalPrint(state,
+             digits_result,                        // 0xN.NNN...
+             2,                                    // offset of any padding
+             static_cast<size_t>(trailing_zeros),  // remaining mantissa padding
+             exp_buffer);                          // exponent
 }
 
 char *CopyStringTo(absl::string_view v, char *out) {
@@ -942,10 +982,10 @@
     int n = snprintf(&space[0], space.size(), fmt, w, p, v);
     if (n < 0) return false;
     if (static_cast<size_t>(n) < space.size()) {
-      result = absl::string_view(space.data(), n);
+      result = absl::string_view(space.data(), static_cast<size_t>(n));
       break;
     }
-    space.resize(n + 1);
+    space.resize(static_cast<size_t>(n) + 1);
   }
   sink->Append(result);
   return true;
@@ -953,13 +993,13 @@
 
 // 128-bits in decimal: ceil(128*log(2)/log(10))
 //   or std::numeric_limits<__uint128_t>::digits10
-constexpr int kMaxFixedPrecision = 39;
+constexpr size_t kMaxFixedPrecision = 39;
 
-constexpr int kBufferLength = /*sign*/ 1 +
-                              /*integer*/ kMaxFixedPrecision +
-                              /*point*/ 1 +
-                              /*fraction*/ kMaxFixedPrecision +
-                              /*exponent e+123*/ 5;
+constexpr size_t kBufferLength = /*sign*/ 1 +
+                                 /*integer*/ kMaxFixedPrecision +
+                                 /*point*/ 1 +
+                                 /*fraction*/ kMaxFixedPrecision +
+                                 /*exponent e+123*/ 5;
 
 struct Buffer {
   void push_front(char c) {
@@ -975,14 +1015,14 @@
     --end;
   }
 
-  char &back() {
+  char &back() const {
     assert(begin < end);
     return end[-1];
   }
 
   char last_digit() const { return end[-1] == '.' ? end[-2] : end[-1]; }
 
-  int size() const { return static_cast<int>(end - begin); }
+  size_t size() const { return static_cast<size_t>(end - begin); }
 
   char data[kBufferLength];
   char *begin;
@@ -1011,8 +1051,9 @@
     return false;
   }
 
-  return sink->PutPaddedString(string_view(text, ptr - text), conv.width(), -1,
-                               conv.has_left_flag());
+  return sink->PutPaddedString(
+      string_view(text, static_cast<size_t>(ptr - text)), conv.width(), -1,
+      conv.has_left_flag());
 }
 
 // Round up the last digit of the value.
@@ -1049,19 +1090,19 @@
   }
   // Exponent digits.
   if (exp > 99) {
-    out->push_back(exp / 100 + '0');
-    out->push_back(exp / 10 % 10 + '0');
-    out->push_back(exp % 10 + '0');
+    out->push_back(static_cast<char>(exp / 100 + '0'));
+    out->push_back(static_cast<char>(exp / 10 % 10 + '0'));
+    out->push_back(static_cast<char>(exp % 10 + '0'));
   } else {
-    out->push_back(exp / 10 + '0');
-    out->push_back(exp % 10 + '0');
+    out->push_back(static_cast<char>(exp / 10 + '0'));
+    out->push_back(static_cast<char>(exp % 10 + '0'));
   }
 }
 
 template <typename Float, typename Int>
 constexpr bool CanFitMantissa() {
   return
-#if defined(__clang__) && !defined(__SSE3__)
+#if defined(__clang__) && (__clang_major__ < 9) && !defined(__SSE3__)
       // Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
       // Casting from long double to uint64_t is miscompiled and drops bits.
       (!std::is_same<Float, long double>::value ||
@@ -1096,8 +1137,8 @@
 // In Fixed mode, we add a '.' at the end.
 // In Precision mode, we add a '.' after the first digit.
 template <FormatStyle mode, typename Int>
-int PrintIntegralDigits(Int digits, Buffer *out) {
-  int printed = 0;
+size_t PrintIntegralDigits(Int digits, Buffer* out) {
+  size_t printed = 0;
   if (digits) {
     for (; digits; digits /= 10) out->push_front(digits % 10 + '0');
     printed = out->size();
@@ -1116,10 +1157,10 @@
 }
 
 // Back out 'extra_digits' digits and round up if necessary.
-bool RemoveExtraPrecision(int extra_digits, bool has_leftover_value,
-                          Buffer *out, int *exp_out) {
-  if (extra_digits <= 0) return false;
-
+void RemoveExtraPrecision(size_t extra_digits,
+                          bool has_leftover_value,
+                          Buffer* out,
+                          int* exp_out) {
   // Back out the extra digits
   out->end -= extra_digits;
 
@@ -1139,15 +1180,17 @@
   if (needs_to_round_up) {
     RoundUp<FormatStyle::Precision>(out, exp_out);
   }
-  return true;
 }
 
 // Print the value into the buffer.
 // This will not include the exponent, which will be returned in 'exp_out' for
 // Precision mode.
 template <typename Int, typename Float, FormatStyle mode>
-bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out,
-                       int *exp_out) {
+bool FloatToBufferImpl(Int int_mantissa,
+                       int exp,
+                       size_t precision,
+                       Buffer* out,
+                       int* exp_out) {
   assert((CanFitMantissa<Float, Int>()));
 
   const int int_bits = std::numeric_limits<Int>::digits;
@@ -1163,14 +1206,16 @@
       // The value will overflow the Int
       return false;
     }
-    int digits_printed = PrintIntegralDigits<mode>(int_mantissa << exp, out);
-    int digits_to_zero_pad = precision;
+    size_t digits_printed = PrintIntegralDigits<mode>(int_mantissa << exp, out);
+    size_t digits_to_zero_pad = precision;
     if (mode == FormatStyle::Precision) {
-      *exp_out = digits_printed - 1;
-      digits_to_zero_pad -= digits_printed - 1;
-      if (RemoveExtraPrecision(-digits_to_zero_pad, false, out, exp_out)) {
+      *exp_out = static_cast<int>(digits_printed - 1);
+      if (digits_to_zero_pad < digits_printed - 1) {
+        RemoveExtraPrecision(digits_printed - 1 - digits_to_zero_pad, false,
+                             out, exp_out);
         return true;
       }
+      digits_to_zero_pad -= digits_printed - 1;
     }
     for (; digits_to_zero_pad-- > 0;) out->push_back('0');
     return true;
@@ -1184,10 +1229,10 @@
   const Int mask = (Int{1} << exp) - 1;
 
   // Print the integral part first.
-  int digits_printed = PrintIntegralDigits<mode>(int_mantissa >> exp, out);
+  size_t digits_printed = PrintIntegralDigits<mode>(int_mantissa >> exp, out);
   int_mantissa &= mask;
 
-  int fractional_count = precision;
+  size_t fractional_count = precision;
   if (mode == FormatStyle::Precision) {
     if (digits_printed == 0) {
       // Find the first non-zero digit, when in Precision mode.
@@ -1203,20 +1248,21 @@
       int_mantissa &= mask;
     } else {
       // We already have a digit, and a '.'
-      *exp_out = digits_printed - 1;
-      fractional_count -= *exp_out;
-      if (RemoveExtraPrecision(-fractional_count, int_mantissa != 0, out,
-                               exp_out)) {
+      *exp_out = static_cast<int>(digits_printed - 1);
+      if (fractional_count < digits_printed - 1) {
         // If we had enough digits, return right away.
         // The code below will try to round again otherwise.
+        RemoveExtraPrecision(digits_printed - 1 - fractional_count,
+                             int_mantissa != 0, out, exp_out);
         return true;
       }
+      fractional_count -= digits_printed - 1;
     }
   }
 
   auto get_next_digit = [&] {
     int_mantissa *= 10;
-    int digit = static_cast<int>(int_mantissa >> exp);
+    char digit = static_cast<char>(int_mantissa >> exp);
     int_mantissa &= mask;
     return digit;
   };
@@ -1226,7 +1272,7 @@
     out->push_back(get_next_digit() + '0');
   }
 
-  int next_digit = get_next_digit();
+  char next_digit = get_next_digit();
   if (next_digit > 5 ||
       (next_digit == 5 && (int_mantissa || out->last_digit() % 2 == 1))) {
     RoundUp<mode>(out, exp_out);
@@ -1236,24 +1282,25 @@
 }
 
 template <FormatStyle mode, typename Float>
-bool FloatToBuffer(Decomposed<Float> decomposed, int precision, Buffer *out,
-                   int *exp) {
+bool FloatToBuffer(Decomposed<Float> decomposed,
+                   size_t precision,
+                   Buffer* out,
+                   int* exp) {
   if (precision > kMaxFixedPrecision) return false;
 
   // Try with uint64_t.
   if (CanFitMantissa<Float, std::uint64_t>() &&
       FloatToBufferImpl<std::uint64_t, Float, mode>(
-          static_cast<std::uint64_t>(decomposed.mantissa),
-          static_cast<std::uint64_t>(decomposed.exponent), precision, out, exp))
+          static_cast<std::uint64_t>(decomposed.mantissa), decomposed.exponent,
+          precision, out, exp))
     return true;
 
 #if defined(ABSL_HAVE_INTRINSIC_INT128)
   // If that is not enough, try with __uint128_t.
   return CanFitMantissa<Float, __uint128_t>() &&
          FloatToBufferImpl<__uint128_t, Float, mode>(
-             static_cast<__uint128_t>(decomposed.mantissa),
-             static_cast<__uint128_t>(decomposed.exponent), precision, out,
-             exp);
+             static_cast<__uint128_t>(decomposed.mantissa), decomposed.exponent,
+             precision, out, exp);
 #endif
   return false;
 }
@@ -1261,12 +1308,15 @@
 void WriteBufferToSink(char sign_char, absl::string_view str,
                        const FormatConversionSpecImpl &conv,
                        FormatSinkImpl *sink) {
-  int left_spaces = 0, zeros = 0, right_spaces = 0;
-  int missing_chars =
-      conv.width() >= 0 ? std::max(conv.width() - static_cast<int>(str.size()) -
-                                       static_cast<int>(sign_char != 0),
-                                   0)
-                        : 0;
+  size_t left_spaces = 0, zeros = 0, right_spaces = 0;
+  size_t missing_chars = 0;
+  if (conv.width() >= 0) {
+    const size_t conv_width_size_t = static_cast<size_t>(conv.width());
+    const size_t existing_chars =
+        str.size() + static_cast<size_t>(sign_char != 0);
+    if (conv_width_size_t > existing_chars)
+      missing_chars = conv_width_size_t - existing_chars;
+  }
   if (conv.has_left_flag()) {
     right_spaces = missing_chars;
   } else if (conv.has_zero_flag()) {
@@ -1302,7 +1352,8 @@
     return true;
   }
 
-  int precision = conv.precision() < 0 ? 6 : conv.precision();
+  size_t precision =
+      conv.precision() < 0 ? 6 : static_cast<size_t>(conv.precision());
 
   int exp = 0;
 
@@ -1329,12 +1380,12 @@
         &buffer);
   } else if (c == FormatConversionCharInternal::g ||
              c == FormatConversionCharInternal::G) {
-    precision = std::max(0, precision - 1);
+    precision = std::max(precision, size_t{1}) - 1;
     if (!FloatToBuffer<FormatStyle::Precision>(decomposed, precision, &buffer,
                                                &exp)) {
       return FallbackToSnprintf(v, conv, sink);
     }
-    if (precision + 1 > exp && exp >= -4) {
+    if ((exp < 0 || precision + 1 > static_cast<size_t>(exp)) && exp >= -4) {
       if (exp < 0) {
         // Have 1.23456, needs 0.00123456
         // Move the first digit
@@ -1369,9 +1420,11 @@
     return false;
   }
 
-  WriteBufferToSink(sign_char,
-                    absl::string_view(buffer.begin, buffer.end - buffer.begin),
-                    conv, sink);
+  WriteBufferToSink(
+      sign_char,
+      absl::string_view(buffer.begin,
+                        static_cast<size_t>(buffer.end - buffer.begin)),
+      conv, sink);
 
   return true;
 }
@@ -1380,10 +1433,9 @@
 
 bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl &conv,
                       FormatSinkImpl *sink) {
-  if (std::numeric_limits<long double>::digits ==
-      2 * std::numeric_limits<double>::digits) {
-    // This is the `double-double` representation of `long double`.
-    // We do not handle it natively. Fallback to snprintf.
+  if (IsDoubleDouble()) {
+    // This is the `double-double` representation of `long double`. We do not
+    // handle it natively. Fallback to snprintf.
     return FallbackToSnprintf(v, conv, sink);
   }
 
diff --git a/abseil-cpp/absl/strings/internal/str_format/float_conversion.h b/abseil-cpp/absl/strings/internal/str_format/float_conversion.h
index e78bc19..71100e7 100644
--- a/abseil-cpp/absl/strings/internal/str_format/float_conversion.h
+++ b/abseil-cpp/absl/strings/internal/str_format/float_conversion.h
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_
 #define ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_
 
diff --git a/abseil-cpp/absl/strings/internal/str_format/output.h b/abseil-cpp/absl/strings/internal/str_format/output.h
index 8030dae..15e751a 100644
--- a/abseil-cpp/absl/strings/internal/str_format/output.h
+++ b/abseil-cpp/absl/strings/internal/str_format/output.h
@@ -22,6 +22,7 @@
 #define ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_
 
 #include <cstdio>
+#include <ios>
 #include <ostream>
 #include <string>
 
@@ -71,7 +72,7 @@
   out->append(s.data(), s.size());
 }
 inline void AbslFormatFlush(std::ostream* out, string_view s) {
-  out->write(s.data(), s.size());
+  out->write(s.data(), static_cast<std::streamsize>(s.size()));
 }
 
 inline void AbslFormatFlush(FILERawSink* sink, string_view v) {
diff --git a/abseil-cpp/absl/strings/internal/str_format/parser.cc b/abseil-cpp/absl/strings/internal/str_format/parser.cc
index cc55dfa..5aaab69 100644
--- a/abseil-cpp/absl/strings/internal/str_format/parser.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/parser.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/internal/str_format/parser.h"
 
 #include <assert.h>
@@ -17,218 +31,14 @@
 ABSL_NAMESPACE_BEGIN
 namespace str_format_internal {
 
-using CC = FormatConversionCharInternal;
-using LM = LengthMod;
+// Define the array for non-constexpr uses.
+constexpr ConvTag ConvTagHolder::value[256];
 
-ABSL_CONST_INIT const ConvTag kTags[256] = {
-    {},    {},    {},    {},    {},    {},    {},    {},     // 00-07
-    {},    {},    {},    {},    {},    {},    {},    {},     // 08-0f
-    {},    {},    {},    {},    {},    {},    {},    {},     // 10-17
-    {},    {},    {},    {},    {},    {},    {},    {},     // 18-1f
-    {},    {},    {},    {},    {},    {},    {},    {},     // 20-27
-    {},    {},    {},    {},    {},    {},    {},    {},     // 28-2f
-    {},    {},    {},    {},    {},    {},    {},    {},     // 30-37
-    {},    {},    {},    {},    {},    {},    {},    {},     // 38-3f
-    {},    CC::A, {},    {},    {},    CC::E, CC::F, CC::G,  // @ABCDEFG
-    {},    {},    {},    {},    LM::L, {},    {},    {},     // HIJKLMNO
-    {},    {},    {},    {},    {},    {},    {},    {},     // PQRSTUVW
-    CC::X, {},    {},    {},    {},    {},    {},    {},     // XYZ[\]^_
-    {},    CC::a, {},    CC::c, CC::d, CC::e, CC::f, CC::g,  // `abcdefg
-    LM::h, CC::i, LM::j, {},    LM::l, {},    CC::n, CC::o,  // hijklmno
-    CC::p, LM::q, {},    CC::s, LM::t, CC::u, {},    {},     // pqrstuvw
-    CC::x, {},    LM::z, {},    {},    {},    {},    {},     // xyz{|}!
-    {},    {},    {},    {},    {},    {},    {},    {},     // 80-87
-    {},    {},    {},    {},    {},    {},    {},    {},     // 88-8f
-    {},    {},    {},    {},    {},    {},    {},    {},     // 90-97
-    {},    {},    {},    {},    {},    {},    {},    {},     // 98-9f
-    {},    {},    {},    {},    {},    {},    {},    {},     // a0-a7
-    {},    {},    {},    {},    {},    {},    {},    {},     // a8-af
-    {},    {},    {},    {},    {},    {},    {},    {},     // b0-b7
-    {},    {},    {},    {},    {},    {},    {},    {},     // b8-bf
-    {},    {},    {},    {},    {},    {},    {},    {},     // c0-c7
-    {},    {},    {},    {},    {},    {},    {},    {},     // c8-cf
-    {},    {},    {},    {},    {},    {},    {},    {},     // d0-d7
-    {},    {},    {},    {},    {},    {},    {},    {},     // d8-df
-    {},    {},    {},    {},    {},    {},    {},    {},     // e0-e7
-    {},    {},    {},    {},    {},    {},    {},    {},     // e8-ef
-    {},    {},    {},    {},    {},    {},    {},    {},     // f0-f7
-    {},    {},    {},    {},    {},    {},    {},    {},     // f8-ff
-};
-
-namespace {
-
-bool CheckFastPathSetting(const UnboundConversion& conv) {
-  bool should_be_basic = !conv.flags.left &&      //
-                         !conv.flags.show_pos &&  //
-                         !conv.flags.sign_col &&  //
-                         !conv.flags.alt &&       //
-                         !conv.flags.zero &&      //
-                         (conv.width.value() == -1) &&
-                         (conv.precision.value() == -1);
-  if (should_be_basic != conv.flags.basic) {
-    fprintf(stderr,
-            "basic=%d left=%d show_pos=%d sign_col=%d alt=%d zero=%d "
-            "width=%d precision=%d\n",
-            conv.flags.basic, conv.flags.left, conv.flags.show_pos,
-            conv.flags.sign_col, conv.flags.alt, conv.flags.zero,
-            conv.width.value(), conv.precision.value());
-  }
-  return should_be_basic == conv.flags.basic;
+ABSL_ATTRIBUTE_NOINLINE const char* ConsumeUnboundConversionNoInline(
+    const char* p, const char* end, UnboundConversion* conv, int* next_arg) {
+  return ConsumeUnboundConversion(p, end, conv, next_arg);
 }
 
-template <bool is_positional>
-const char *ConsumeConversion(const char *pos, const char *const end,
-                              UnboundConversion *conv, int *next_arg) {
-  const char* const original_pos = pos;
-  char c;
-  // Read the next char into `c` and update `pos`. Returns false if there are
-  // no more chars to read.
-#define ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR()          \
-  do {                                                  \
-    if (ABSL_PREDICT_FALSE(pos == end)) return nullptr; \
-    c = *pos++;                                         \
-  } while (0)
-
-  const auto parse_digits = [&] {
-    int digits = c - '0';
-    // We do not want to overflow `digits` so we consume at most digits10
-    // digits. If there are more digits the parsing will fail later on when the
-    // digit doesn't match the expected characters.
-    int num_digits = std::numeric_limits<int>::digits10;
-    for (;;) {
-      if (ABSL_PREDICT_FALSE(pos == end)) break;
-      c = *pos++;
-      if (!std::isdigit(c)) break;
-      --num_digits;
-      if (ABSL_PREDICT_FALSE(!num_digits)) break;
-      digits = 10 * digits + c - '0';
-    }
-    return digits;
-  };
-
-  if (is_positional) {
-    ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-    if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
-    conv->arg_position = parse_digits();
-    assert(conv->arg_position > 0);
-    if (ABSL_PREDICT_FALSE(c != '$')) return nullptr;
-  }
-
-  ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-
-  // We should start with the basic flag on.
-  assert(conv->flags.basic);
-
-  // Any non alpha character makes this conversion not basic.
-  // This includes flags (-+ #0), width (1-9, *) or precision (.).
-  // All conversion characters and length modifiers are alpha characters.
-  if (c < 'A') {
-    conv->flags.basic = false;
-
-    for (; c <= '0';) {
-      // FIXME: We might be able to speed this up reusing the lookup table from
-      // above. It might require changing Flags to be a plain integer where we
-      // can |= a value.
-      switch (c) {
-        case '-':
-          conv->flags.left = true;
-          break;
-        case '+':
-          conv->flags.show_pos = true;
-          break;
-        case ' ':
-          conv->flags.sign_col = true;
-          break;
-        case '#':
-          conv->flags.alt = true;
-          break;
-        case '0':
-          conv->flags.zero = true;
-          break;
-        default:
-          goto flags_done;
-      }
-      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-    }
-flags_done:
-
-    if (c <= '9') {
-      if (c >= '0') {
-        int maybe_width = parse_digits();
-        if (!is_positional && c == '$') {
-          if (ABSL_PREDICT_FALSE(*next_arg != 0)) return nullptr;
-          // Positional conversion.
-          *next_arg = -1;
-          conv->flags = Flags();
-          conv->flags.basic = true;
-          return ConsumeConversion<true>(original_pos, end, conv, next_arg);
-        }
-        conv->width.set_value(maybe_width);
-      } else if (c == '*') {
-        ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-        if (is_positional) {
-          if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
-          conv->width.set_from_arg(parse_digits());
-          if (ABSL_PREDICT_FALSE(c != '$')) return nullptr;
-          ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-        } else {
-          conv->width.set_from_arg(++*next_arg);
-        }
-      }
-    }
-
-    if (c == '.') {
-      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-      if (std::isdigit(c)) {
-        conv->precision.set_value(parse_digits());
-      } else if (c == '*') {
-        ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-        if (is_positional) {
-          if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
-          conv->precision.set_from_arg(parse_digits());
-          if (c != '$') return nullptr;
-          ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-        } else {
-          conv->precision.set_from_arg(++*next_arg);
-        }
-      } else {
-        conv->precision.set_value(0);
-      }
-    }
-  }
-
-  auto tag = GetTagForChar(c);
-
-  if (ABSL_PREDICT_FALSE(!tag.is_conv())) {
-    if (ABSL_PREDICT_FALSE(!tag.is_length())) return nullptr;
-
-    // It is a length modifier.
-    using str_format_internal::LengthMod;
-    LengthMod length_mod = tag.as_length();
-    ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-    if (c == 'h' && length_mod == LengthMod::h) {
-      conv->length_mod = LengthMod::hh;
-      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-    } else if (c == 'l' && length_mod == LengthMod::l) {
-      conv->length_mod = LengthMod::ll;
-      ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
-    } else {
-      conv->length_mod = length_mod;
-    }
-    tag = GetTagForChar(c);
-    if (ABSL_PREDICT_FALSE(!tag.is_conv())) return nullptr;
-  }
-
-  assert(CheckFastPathSetting(*conv));
-  (void)(&CheckFastPathSetting);
-
-  conv->conv = tag.as_conv();
-  if (!is_positional) conv->arg_position = ++*next_arg;
-  return pos;
-}
-
-}  // namespace
-
 std::string LengthModToString(LengthMod v) {
   switch (v) {
     case LengthMod::h:
@@ -255,12 +65,6 @@
   return "";
 }
 
-const char *ConsumeUnboundConversion(const char *p, const char *end,
-                                     UnboundConversion *conv, int *next_arg) {
-  if (*next_arg < 0) return ConsumeConversion<true>(p, end, conv, next_arg);
-  return ConsumeConversion<false>(p, end, conv, next_arg);
-}
-
 struct ParsedFormatBase::ParsedFormatConsumer {
   explicit ParsedFormatConsumer(ParsedFormatBase *parsedformat)
       : parsed(parsedformat), data_pos(parsedformat->data_.get()) {}
@@ -309,11 +113,11 @@
     std::initializer_list<FormatConversionCharSet> convs) const {
   std::unordered_set<int> used;
   auto add_if_valid_conv = [&](int pos, char c) {
-      if (static_cast<size_t>(pos) > convs.size() ||
-          !Contains(convs.begin()[pos - 1], c))
-        return false;
-      used.insert(pos);
-      return true;
+    if (static_cast<size_t>(pos) > convs.size() ||
+        !Contains(convs.begin()[pos - 1], c))
+      return false;
+    used.insert(pos);
+    return true;
   };
   for (const ConversionItem &item : items_) {
     if (!item.is_conversion) continue;
diff --git a/abseil-cpp/absl/strings/internal/str_format/parser.h b/abseil-cpp/absl/strings/internal/str_format/parser.h
index fffed04..35b6d49 100644
--- a/abseil-cpp/absl/strings/internal/str_format/parser.h
+++ b/abseil-cpp/absl/strings/internal/str_format/parser.h
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_
 #define ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_
 
@@ -15,103 +29,18 @@
 #include <vector>
 
 #include "absl/strings/internal/str_format/checker.h"
+#include "absl/strings/internal/str_format/constexpr_parser.h"
 #include "absl/strings/internal/str_format/extension.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace str_format_internal {
 
-enum class LengthMod : std::uint8_t { h, hh, l, ll, L, j, z, t, q, none };
-
 std::string LengthModToString(LengthMod v);
 
-// The analyzed properties of a single specified conversion.
-struct UnboundConversion {
-  UnboundConversion()
-      : flags() /* This is required to zero all the fields of flags. */ {
-    flags.basic = true;
-  }
-
-  class InputValue {
-   public:
-    void set_value(int value) {
-      assert(value >= 0);
-      value_ = value;
-    }
-    int value() const { return value_; }
-
-    // Marks the value as "from arg". aka the '*' format.
-    // Requires `value >= 1`.
-    // When set, is_from_arg() return true and get_from_arg() returns the
-    // original value.
-    // `value()`'s return value is unspecfied in this state.
-    void set_from_arg(int value) {
-      assert(value > 0);
-      value_ = -value - 1;
-    }
-    bool is_from_arg() const { return value_ < -1; }
-    int get_from_arg() const {
-      assert(is_from_arg());
-      return -value_ - 1;
-    }
-
-   private:
-    int value_ = -1;
-  };
-
-  // No need to initialize. It will always be set in the parser.
-  int arg_position;
-
-  InputValue width;
-  InputValue precision;
-
-  Flags flags;
-  LengthMod length_mod = LengthMod::none;
-  FormatConversionChar conv = FormatConversionCharInternal::kNone;
-};
-
-// Consume conversion spec prefix (not including '%') of [p, end) if valid.
-// Examples of valid specs would be e.g.: "s", "d", "-12.6f".
-// If valid, it returns the first character following the conversion spec,
-// and the spec part is broken down and returned in 'conv'.
-// If invalid, returns nullptr.
-const char* ConsumeUnboundConversion(const char* p, const char* end,
-                                     UnboundConversion* conv, int* next_arg);
-
-// Helper tag class for the table below.
-// It allows fast `char -> ConversionChar/LengthMod` checking and
-// conversions.
-class ConvTag {
- public:
-  constexpr ConvTag(FormatConversionChar conversion_char)  // NOLINT
-      : tag_(static_cast<int8_t>(conversion_char)) {}
-  // We invert the length modifiers to make them negative so that we can easily
-  // test for them.
-  constexpr ConvTag(LengthMod length_mod)  // NOLINT
-      : tag_(~static_cast<std::int8_t>(length_mod)) {}
-  // Everything else is -128, which is negative to make is_conv() simpler.
-  constexpr ConvTag() : tag_(-128) {}
-
-  bool is_conv() const { return tag_ >= 0; }
-  bool is_length() const { return tag_ < 0 && tag_ != -128; }
-  FormatConversionChar as_conv() const {
-    assert(is_conv());
-    return static_cast<FormatConversionChar>(tag_);
-  }
-  LengthMod as_length() const {
-    assert(is_length());
-    return static_cast<LengthMod>(~tag_);
-  }
-
- private:
-  std::int8_t tag_;
-};
-
-extern const ConvTag kTags[256];
-// Keep a single table for all the conversion chars and length modifiers.
-inline ConvTag GetTagForChar(char c) {
-  return kTags[static_cast<unsigned char>(c)];
-}
+const char* ConsumeUnboundConversionNoInline(const char* p, const char* end,
+                                             UnboundConversion* conv,
+                                             int* next_arg);
 
 // Parse the format string provided in 'src' and pass the identified items into
 // 'consumer'.
@@ -129,13 +58,15 @@
   const char* p = src.data();
   const char* const end = p + src.size();
   while (p != end) {
-    const char* percent = static_cast<const char*>(memchr(p, '%', end - p));
+    const char* percent =
+        static_cast<const char*>(memchr(p, '%', static_cast<size_t>(end - p)));
     if (!percent) {
       // We found the last substring.
-      return consumer.Append(string_view(p, end - p));
+      return consumer.Append(string_view(p, static_cast<size_t>(end - p)));
     }
     // We found a percent, so push the text run then process the percent.
-    if (ABSL_PREDICT_FALSE(!consumer.Append(string_view(p, percent - p)))) {
+    if (ABSL_PREDICT_FALSE(!consumer.Append(
+            string_view(p, static_cast<size_t>(percent - p))))) {
       return false;
     }
     if (ABSL_PREDICT_FALSE(percent + 1 >= end)) return false;
@@ -163,10 +94,11 @@
       }
     } else if (percent[1] != '%') {
       UnboundConversion conv;
-      p = ConsumeUnboundConversion(percent + 1, end, &conv, &next_arg);
+      p = ConsumeUnboundConversionNoInline(percent + 1, end, &conv, &next_arg);
       if (ABSL_PREDICT_FALSE(p == nullptr)) return false;
       if (ABSL_PREDICT_FALSE(!consumer.ConvertOne(
-          conv, string_view(percent + 1, p - (percent + 1))))) {
+              conv, string_view(percent + 1,
+                                static_cast<size_t>(p - (percent + 1)))))) {
         return false;
       }
     } else {
@@ -220,7 +152,8 @@
     string_view text(base, 0);
     for (const auto& item : items_) {
       const char* const end = text.data() + text.size();
-      text = string_view(end, (base + item.text_end) - end);
+      text =
+          string_view(end, static_cast<size_t>((base + item.text_end) - end));
       if (item.is_conversion) {
         if (!consumer.ConvertOne(item.conv, text)) return false;
       } else {
diff --git a/abseil-cpp/absl/strings/internal/str_format/parser_test.cc b/abseil-cpp/absl/strings/internal/str_format/parser_test.cc
index 5aced98..021f6a8 100644
--- a/abseil-cpp/absl/strings/internal/str_format/parser_test.cc
+++ b/abseil-cpp/absl/strings/internal/str_format/parser_test.cc
@@ -1,3 +1,17 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 #include "absl/strings/internal/str_format/parser.h"
 
 #include <string.h>
@@ -96,10 +110,14 @@
     {__LINE__, "ba",   "",     "ba"},  // 'b' is invalid
     {__LINE__, "l",    "",     "l" },  // just length mod isn't okay
     {__LINE__, "d",    "d",    ""  },  // basic
+    {__LINE__, "v",    "v",    ""  },  // basic
     {__LINE__, "d ",   "d",    " " },  // leave suffix
     {__LINE__, "dd",   "d",    "d" },  // don't be greedy
     {__LINE__, "d9",   "d",    "9" },  // leave non-space suffix
     {__LINE__, "dzz",  "d",    "zz"},  // length mod as suffix
+    {__LINE__, "3v",   "",     "3v"},  // 'v' cannot have modifiers
+    {__LINE__, "hv",   "",     "hv"},  // 'v' cannot have modifiers
+    {__LINE__, "1$v",   "1$v",     ""},  // 'v' can have use posix syntax
     {__LINE__, "1$*2$d", "1$*2$d", ""  },  // arg indexing and * allowed.
     {__LINE__, "0-14.3hhd", "0-14.3hhd", ""},  // precision, width
     {__LINE__, " 0-+#14.3hhd", " 0-+#14.3hhd", ""},  // flags
@@ -256,15 +274,22 @@
       for (int k = 0; k < kNumFlags; ++k)
         if ((i >> k) & 1) fmt += kAllFlags[k];
       // flag order shouldn't matter
-      if (rev == 1) { std::reverse(fmt.begin(), fmt.end()); }
+      if (rev == 1) {
+        std::reverse(fmt.begin(), fmt.end());
+      }
       fmt += 'd';
       SCOPED_TRACE(fmt);
       EXPECT_TRUE(Run(fmt.c_str()));
-      EXPECT_EQ(fmt.find('-') == std::string::npos, !o.flags.left);
-      EXPECT_EQ(fmt.find('+') == std::string::npos, !o.flags.show_pos);
-      EXPECT_EQ(fmt.find(' ') == std::string::npos, !o.flags.sign_col);
-      EXPECT_EQ(fmt.find('#') == std::string::npos, !o.flags.alt);
-      EXPECT_EQ(fmt.find('0') == std::string::npos, !o.flags.zero);
+      EXPECT_EQ(fmt.find('-') == std::string::npos,
+                !FlagsContains(o.flags, Flags::kLeft));
+      EXPECT_EQ(fmt.find('+') == std::string::npos,
+                !FlagsContains(o.flags, Flags::kShowPos));
+      EXPECT_EQ(fmt.find(' ') == std::string::npos,
+                !FlagsContains(o.flags, Flags::kSignCol));
+      EXPECT_EQ(fmt.find('#') == std::string::npos,
+                !FlagsContains(o.flags, Flags::kAlt));
+      EXPECT_EQ(fmt.find('0') == std::string::npos,
+                !FlagsContains(o.flags, Flags::kZero));
     }
   }
 }
@@ -274,14 +299,14 @@
   for (const char* fmt : {"d", "llx", "G", "1$X"}) {
     SCOPED_TRACE(fmt);
     EXPECT_TRUE(Run(fmt));
-    EXPECT_TRUE(o.flags.basic);
+    EXPECT_EQ(o.flags, Flags::kBasic);
   }
 
   // Flag is off
   for (const char* fmt : {"3d", ".llx", "-G", "1$#X"}) {
     SCOPED_TRACE(fmt);
     EXPECT_TRUE(Run(fmt));
-    EXPECT_FALSE(o.flags.basic);
+    EXPECT_NE(o.flags, Flags::kBasic);
   }
 }
 
diff --git a/abseil-cpp/absl/strings/internal/str_join_internal.h b/abseil-cpp/absl/strings/internal/str_join_internal.h
index 31dbf67..d97d503 100644
--- a/abseil-cpp/absl/strings/internal/str_join_internal.h
+++ b/abseil-cpp/absl/strings/internal/str_join_internal.h
@@ -229,10 +229,11 @@
   std::string result;
   if (start != end) {
     // Sums size
-    size_t result_size = start->size();
+    auto&& start_value = *start;
+    size_t result_size = start_value.size();
     for (Iterator it = start; ++it != end;) {
       result_size += s.size();
-      result_size += it->size();
+      result_size += (*it).size();
     }
 
     if (result_size > 0) {
@@ -240,13 +241,15 @@
 
       // Joins strings
       char* result_buf = &*result.begin();
-      memcpy(result_buf, start->data(), start->size());
-      result_buf += start->size();
+
+      memcpy(result_buf, start_value.data(), start_value.size());
+      result_buf += start_value.size();
       for (Iterator it = start; ++it != end;) {
         memcpy(result_buf, s.data(), s.size());
         result_buf += s.size();
-        memcpy(result_buf, it->data(), it->size());
-        result_buf += it->size();
+        auto&& value = *it;
+        memcpy(result_buf, value.data(), value.size());
+        result_buf += value.size();
       }
     }
   }
diff --git a/abseil-cpp/absl/strings/internal/str_split_internal.h b/abseil-cpp/absl/strings/internal/str_split_internal.h
index 6f5bc09..081ad85 100644
--- a/abseil-cpp/absl/strings/internal/str_split_internal.h
+++ b/abseil-cpp/absl/strings/internal/str_split_internal.h
@@ -32,7 +32,7 @@
 #include <array>
 #include <initializer_list>
 #include <iterator>
-#include <map>
+#include <tuple>
 #include <type_traits>
 #include <utility>
 #include <vector>
@@ -51,9 +51,9 @@
 namespace strings_internal {
 
 // This class is implicitly constructible from everything that absl::string_view
-// is implicitly constructible from. If it's constructed from a temporary
-// string, the data is moved into a data member so its lifetime matches that of
-// the ConvertibleToStringView instance.
+// is implicitly constructible from, except for rvalue strings.  This means it
+// can be used as a function parameter in places where passing a temporary
+// string might cause memory lifetime issues.
 class ConvertibleToStringView {
  public:
   ConvertibleToStringView(const char* s)  // NOLINT(runtime/explicit)
@@ -64,42 +64,13 @@
   ConvertibleToStringView(const std::string& s)  // NOLINT(runtime/explicit)
       : value_(s) {}
 
-  // Matches rvalue strings and moves their data to a member.
-  ConvertibleToStringView(std::string&& s)  // NOLINT(runtime/explicit)
-      : copy_(std::move(s)), value_(copy_) {}
-
-  ConvertibleToStringView(const ConvertibleToStringView& other)
-      : copy_(other.copy_),
-        value_(other.IsSelfReferential() ? copy_ : other.value_) {}
-
-  ConvertibleToStringView(ConvertibleToStringView&& other) {
-    StealMembers(std::move(other));
-  }
-
-  ConvertibleToStringView& operator=(ConvertibleToStringView other) {
-    StealMembers(std::move(other));
-    return *this;
-  }
+  // Disable conversion from rvalue strings.
+  ConvertibleToStringView(std::string&& s) = delete;
+  ConvertibleToStringView(const std::string&& s) = delete;
 
   absl::string_view value() const { return value_; }
 
  private:
-  // Returns true if ctsp's value refers to its internal copy_ member.
-  bool IsSelfReferential() const { return value_.data() == copy_.data(); }
-
-  void StealMembers(ConvertibleToStringView&& other) {
-    if (other.IsSelfReferential()) {
-      copy_ = std::move(other.copy_);
-      value_ = copy_;
-      other.value_ = other.copy_;
-    } else {
-      value_ = other.value_;
-    }
-  }
-
-  // Holds the data moved from temporary std::string arguments. Declared first
-  // so that 'value' can refer to 'copy_'.
-  std::string copy_;
   absl::string_view value_;
 };
 
@@ -161,7 +132,8 @@
       const absl::string_view text = splitter_->text();
       const absl::string_view d = delimiter_.Find(text, pos_);
       if (d.data() == text.data() + text.size()) state_ = kLastState;
-      curr_ = text.substr(pos_, d.data() - (text.data() + pos_));
+      curr_ = text.substr(pos_,
+                          static_cast<size_t>(d.data() - (text.data() + pos_)));
       pos_ += curr_.size() + d.size();
     } while (!predicate_(curr_));
     return *this;
@@ -211,6 +183,13 @@
 struct HasConstIterator<T, absl::void_t<typename T::const_iterator>>
     : std::true_type {};
 
+// HasEmplace<T>::value is true iff there exists a method T::emplace().
+template <typename T, typename = void>
+struct HasEmplace : std::false_type {};
+template <typename T>
+struct HasEmplace<T, absl::void_t<decltype(std::declval<T>().emplace())>>
+    : std::true_type {};
+
 // IsInitializerList<T>::value is true iff T is an std::initializer_list. More
 // details below in Splitter<> where this is used.
 std::false_type IsInitializerListDispatch(...);  // default: No
@@ -256,6 +235,24 @@
           HasMappedType<C>::value> {
 };
 
+template <typename StringType, typename Container, typename = void>
+struct ShouldUseLifetimeBound : std::false_type {};
+
+template <typename StringType, typename Container>
+struct ShouldUseLifetimeBound<
+    StringType, Container,
+    std::enable_if_t<
+        std::is_same<StringType, std::string>::value &&
+        std::is_same<typename Container::value_type, absl::string_view>::value>>
+    : std::true_type {};
+
+template <typename StringType, typename First, typename Second>
+using ShouldUseLifetimeBoundForPair = std::integral_constant<
+    bool, std::is_same<StringType, std::string>::value &&
+              (std::is_same<First, absl::string_view>::value ||
+               std::is_same<Second, absl::string_view>::value)>;
+
+
 // This class implements the range that is returned by absl::StrSplit(). This
 // class has templated conversion operators that allow it to be implicitly
 // converted to a variety of types that the caller may have specified on the
@@ -273,7 +270,11 @@
 // the split strings: only strings for which the predicate returns true will be
 // kept. A Predicate object is any unary functor that takes an absl::string_view
 // and returns bool.
-template <typename Delimiter, typename Predicate>
+//
+// The StringType parameter can be either string_view or string, depending on
+// whether the Splitter refers to a string stored elsewhere, or if the string
+// resides inside the Splitter itself.
+template <typename Delimiter, typename Predicate, typename StringType>
 class Splitter {
  public:
   using DelimiterType = Delimiter;
@@ -281,12 +282,12 @@
   using const_iterator = strings_internal::SplitIterator<Splitter>;
   using value_type = typename std::iterator_traits<const_iterator>::value_type;
 
-  Splitter(ConvertibleToStringView input_text, Delimiter d, Predicate p)
+  Splitter(StringType input_text, Delimiter d, Predicate p)
       : text_(std::move(input_text)),
         delimiter_(std::move(d)),
         predicate_(std::move(p)) {}
 
-  absl::string_view text() const { return text_.value(); }
+  absl::string_view text() const { return text_; }
   const Delimiter& delimiter() const { return delimiter_; }
   const Predicate& predicate() const { return predicate_; }
 
@@ -298,10 +299,24 @@
 
   // An implicit conversion operator that is restricted to only those containers
   // that the splitter is convertible to.
-  template <typename Container,
-            typename = typename std::enable_if<
-                SplitterIsConvertibleTo<Container>::value>::type>
-  operator Container() const {  // NOLINT(runtime/explicit)
+  template <
+      typename Container,
+      std::enable_if_t<ShouldUseLifetimeBound<StringType, Container>::value &&
+                           SplitterIsConvertibleTo<Container>::value,
+                       std::nullptr_t> = nullptr>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  operator Container() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return ConvertToContainer<Container, typename Container::value_type,
+                              HasMappedType<Container>::value>()(*this);
+  }
+
+  template <
+      typename Container,
+      std::enable_if_t<!ShouldUseLifetimeBound<StringType, Container>::value &&
+                           SplitterIsConvertibleTo<Container>::value,
+                       std::nullptr_t> = nullptr>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  operator Container() const {
     return ConvertToContainer<Container, typename Container::value_type,
                               HasMappedType<Container>::value>()(*this);
   }
@@ -310,8 +325,27 @@
   // strings returned by the begin() iterator. Either/both of .first and .second
   // will be constructed with empty strings if the iterator doesn't have a
   // corresponding value.
+  template <typename First, typename Second,
+            std::enable_if_t<
+                ShouldUseLifetimeBoundForPair<StringType, First, Second>::value,
+                std::nullptr_t> = nullptr>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  operator std::pair<First, Second>() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return ConvertToPair<First, Second>();
+  }
+
+  template <typename First, typename Second,
+            std::enable_if_t<!ShouldUseLifetimeBoundForPair<StringType, First,
+                                                            Second>::value,
+                             std::nullptr_t> = nullptr>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  operator std::pair<First, Second>() const {
+    return ConvertToPair<First, Second>();
+  }
+
+ private:
   template <typename First, typename Second>
-  operator std::pair<First, Second>() const {  // NOLINT(runtime/explicit)
+  std::pair<First, Second> ConvertToPair() const {
     absl::string_view first, second;
     auto it = begin();
     if (it != end()) {
@@ -323,7 +357,6 @@
     return {First(first), Second(second)};
   }
 
- private:
   // ConvertToContainer is a functor converting a Splitter to the requested
   // Container of ValueType. It is specialized below to optimize splitting to
   // certain combinations of Container and ValueType.
@@ -336,7 +369,7 @@
     Container operator()(const Splitter& splitter) const {
       Container c;
       auto it = std::inserter(c, c.end());
-      for (const auto sp : splitter) {
+      for (const auto& sp : splitter) {
         *it++ = ValueType(sp);
       }
       return c;
@@ -397,53 +430,46 @@
   // value.
   template <typename Container, typename First, typename Second>
   struct ConvertToContainer<Container, std::pair<const First, Second>, true> {
+    using iterator = typename Container::iterator;
+
     Container operator()(const Splitter& splitter) const {
       Container m;
-      typename Container::iterator it;
+      iterator it;
       bool insert = true;
-      for (const auto sp : splitter) {
+      for (const absl::string_view sv : splitter) {
         if (insert) {
-          it = Inserter<Container>::Insert(&m, First(sp), Second());
+          it = InsertOrEmplace(&m, sv);
         } else {
-          it->second = Second(sp);
+          it->second = Second(sv);
         }
         insert = !insert;
       }
       return m;
     }
 
-    // Inserts the key and value into the given map, returning an iterator to
-    // the inserted item. Specialized for std::map and std::multimap to use
-    // emplace() and adapt emplace()'s return value.
-    template <typename Map>
-    struct Inserter {
-      using M = Map;
-      template <typename... Args>
-      static typename M::iterator Insert(M* m, Args&&... args) {
-        return m->insert(std::make_pair(std::forward<Args>(args)...)).first;
-      }
-    };
+    // Inserts the key and an empty value into the map, returning an iterator to
+    // the inserted item. We use emplace() if available, otherwise insert().
+    template <typename M>
+    static absl::enable_if_t<HasEmplace<M>::value, iterator> InsertOrEmplace(
+        M* m, absl::string_view key) {
+      // Use piecewise_construct to support old versions of gcc in which pair
+      // constructor can't otherwise construct string from string_view.
+      return ToIter(m->emplace(std::piecewise_construct, std::make_tuple(key),
+                               std::tuple<>()));
+    }
+    template <typename M>
+    static absl::enable_if_t<!HasEmplace<M>::value, iterator> InsertOrEmplace(
+        M* m, absl::string_view key) {
+      return ToIter(m->insert(std::make_pair(First(key), Second(""))));
+    }
 
-    template <typename... Ts>
-    struct Inserter<std::map<Ts...>> {
-      using M = std::map<Ts...>;
-      template <typename... Args>
-      static typename M::iterator Insert(M* m, Args&&... args) {
-        return m->emplace(std::make_pair(std::forward<Args>(args)...)).first;
-      }
-    };
-
-    template <typename... Ts>
-    struct Inserter<std::multimap<Ts...>> {
-      using M = std::multimap<Ts...>;
-      template <typename... Args>
-      static typename M::iterator Insert(M* m, Args&&... args) {
-        return m->emplace(std::make_pair(std::forward<Args>(args)...));
-      }
-    };
+    static iterator ToIter(std::pair<iterator, bool> pair) {
+      return pair.first;
+    }
+    static iterator ToIter(iterator iter) { return iter; }
   };
 
-  ConvertibleToStringView text_;
+  StringType text_;
   Delimiter delimiter_;
   Predicate predicate_;
 };
diff --git a/abseil-cpp/absl/strings/internal/string_constant.h b/abseil-cpp/absl/strings/internal/string_constant.h
new file mode 100644
index 0000000..f68b17d
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/string_constant.h
@@ -0,0 +1,72 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+#define ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// StringConstant<T> represents a compile time string constant.
+// It can be accessed via its `absl::string_view value` static member.
+// It is guaranteed that the `string_view` returned has constant `.data()`,
+// constant `.size()` and constant `value[i]` for all `0 <= i < .size()`
+//
+// The `T` is an opaque type. It is guaranteed that different string constants
+// will have different values of `T`. This allows users to associate the string
+// constant with other static state at compile time.
+//
+// Instances should be made using the `MakeStringConstant()` factory function
+// below.
+template <typename T>
+struct StringConstant {
+ private:
+  static constexpr bool TryConstexprEval(absl::string_view view) {
+    return view.empty() || 2 * view[0] != 1;
+  }
+
+ public:
+  static constexpr absl::string_view value = T{}();
+  constexpr absl::string_view operator()() const { return value; }
+
+  // Check to be sure `view` points to constant data.
+  // Otherwise, it can't be constant evaluated.
+  static_assert(TryConstexprEval(value),
+                "The input string_view must point to constant data.");
+};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+template <typename T>
+constexpr absl::string_view StringConstant<T>::value;
+#endif
+
+// Factory function for `StringConstant` instances.
+// It supports callables that have a constexpr default constructor and a
+// constexpr operator().
+// It must return an `absl::string_view` or `const char*` pointing to constant
+// data. This is validated at compile time.
+template <typename T>
+constexpr StringConstant<T> MakeStringConstant(T) {
+  return {};
+}
+
+}  // namespace strings_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
diff --git a/abseil-cpp/absl/strings/internal/string_constant_test.cc b/abseil-cpp/absl/strings/internal/string_constant_test.cc
new file mode 100644
index 0000000..392833c
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/string_constant_test.cc
@@ -0,0 +1,60 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/string_constant.h"
+
+#include "absl/meta/type_traits.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+using absl::strings_internal::MakeStringConstant;
+
+struct Callable {
+  constexpr absl::string_view operator()() const {
+    return absl::string_view("Callable", 8);
+  }
+};
+
+TEST(StringConstant, Traits) {
+  constexpr auto str = MakeStringConstant(Callable{});
+  using T = decltype(str);
+
+  EXPECT_TRUE(std::is_empty<T>::value);
+  EXPECT_TRUE(std::is_trivial<T>::value);
+  EXPECT_TRUE(absl::is_trivially_default_constructible<T>::value);
+  EXPECT_TRUE(absl::is_trivially_copy_constructible<T>::value);
+  EXPECT_TRUE(absl::is_trivially_move_constructible<T>::value);
+  EXPECT_TRUE(absl::is_trivially_destructible<T>::value);
+}
+
+TEST(StringConstant, MakeFromCallable) {
+  constexpr auto str = MakeStringConstant(Callable{});
+  using T = decltype(str);
+  EXPECT_EQ(Callable{}(), T::value);
+  EXPECT_EQ(Callable{}(), str());
+}
+
+TEST(StringConstant, MakeFromStringConstant) {
+  // We want to make sure the StringConstant itself is a valid input to the
+  // factory function.
+  constexpr auto str = MakeStringConstant(Callable{});
+  constexpr auto str2 = MakeStringConstant(str);
+  using T = decltype(str2);
+  EXPECT_EQ(Callable{}(), T::value);
+  EXPECT_EQ(Callable{}(), str2());
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/strings/internal/stringify_sink.cc b/abseil-cpp/absl/strings/internal/stringify_sink.cc
new file mode 100644
index 0000000..7c6995a
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/stringify_sink.cc
@@ -0,0 +1,28 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/stringify_sink.h"
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+void StringifySink::Append(size_t count, char ch) { buffer_.append(count, ch); }
+
+void StringifySink::Append(string_view v) {
+  buffer_.append(v.data(), v.size());
+}
+
+}  // namespace strings_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/strings/internal/stringify_sink.h b/abseil-cpp/absl/strings/internal/stringify_sink.h
new file mode 100644
index 0000000..fc3747b
--- /dev/null
+++ b/abseil-cpp/absl/strings/internal/stringify_sink.h
@@ -0,0 +1,57 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_
+#define ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_
+
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace strings_internal {
+class StringifySink {
+ public:
+  void Append(size_t count, char ch);
+
+  void Append(string_view v);
+
+  // Support `absl::Format(&sink, format, args...)`.
+  friend void AbslFormatFlush(StringifySink* sink, absl::string_view v) {
+    sink->Append(v);
+  }
+
+ private:
+  template <typename T>
+  friend string_view ExtractStringification(StringifySink& sink, const T& v);
+
+  std::string buffer_;
+};
+
+template <typename T>
+string_view ExtractStringification(StringifySink& sink, const T& v) {
+  AbslStringify(sink, v);
+  return sink.buffer_;
+}
+
+}  // namespace strings_internal
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_
diff --git a/abseil-cpp/absl/strings/internal/utf8.cc b/abseil-cpp/absl/strings/internal/utf8.cc
index 8fd8edc..7ecb93d 100644
--- a/abseil-cpp/absl/strings/internal/utf8.cc
+++ b/abseil-cpp/absl/strings/internal/utf8.cc
@@ -25,25 +25,25 @@
     *buffer = static_cast<char>(utf8_char);
     return 1;
   } else if (utf8_char <= 0x7FF) {
-    buffer[1] = 0x80 | (utf8_char & 0x3F);
+    buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
     utf8_char >>= 6;
-    buffer[0] = 0xC0 | utf8_char;
+    buffer[0] = static_cast<char>(0xC0 | utf8_char);
     return 2;
   } else if (utf8_char <= 0xFFFF) {
-    buffer[2] = 0x80 | (utf8_char & 0x3F);
+    buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
     utf8_char >>= 6;
-    buffer[1] = 0x80 | (utf8_char & 0x3F);
+    buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
     utf8_char >>= 6;
-    buffer[0] = 0xE0 | utf8_char;
+    buffer[0] = static_cast<char>(0xE0 | utf8_char);
     return 3;
   } else {
-    buffer[3] = 0x80 | (utf8_char & 0x3F);
+    buffer[3] = static_cast<char>(0x80 | (utf8_char & 0x3F));
     utf8_char >>= 6;
-    buffer[2] = 0x80 | (utf8_char & 0x3F);
+    buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
     utf8_char >>= 6;
-    buffer[1] = 0x80 | (utf8_char & 0x3F);
+    buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
     utf8_char >>= 6;
-    buffer[0] = 0xF0 | utf8_char;
+    buffer[0] = static_cast<char>(0xF0 | utf8_char);
     return 4;
   }
 }
diff --git a/abseil-cpp/absl/strings/match.cc b/abseil-cpp/absl/strings/match.cc
index 8127cb0..3b81b2c 100644
--- a/abseil-cpp/absl/strings/match.cc
+++ b/abseil-cpp/absl/strings/match.cc
@@ -14,27 +14,117 @@
 
 #include "absl/strings/match.h"
 
+#include <algorithm>
+#include <cstdint>
+
+#include "absl/base/internal/endian.h"
+#include "absl/numeric/bits.h"
+#include "absl/strings/ascii.h"
 #include "absl/strings/internal/memutil.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-bool EqualsIgnoreCase(absl::string_view piece1, absl::string_view piece2) {
+bool EqualsIgnoreCase(absl::string_view piece1,
+                      absl::string_view piece2) noexcept {
   return (piece1.size() == piece2.size() &&
           0 == absl::strings_internal::memcasecmp(piece1.data(), piece2.data(),
                                                   piece1.size()));
   // memcasecmp uses absl::ascii_tolower().
 }
 
-bool StartsWithIgnoreCase(absl::string_view text, absl::string_view prefix) {
+bool StrContainsIgnoreCase(absl::string_view haystack,
+                           absl::string_view needle) noexcept {
+  while (haystack.size() >= needle.size()) {
+    if (StartsWithIgnoreCase(haystack, needle)) return true;
+    haystack.remove_prefix(1);
+  }
+  return false;
+}
+
+bool StrContainsIgnoreCase(absl::string_view haystack,
+                           char needle) noexcept {
+  char upper_needle = absl::ascii_toupper(static_cast<unsigned char>(needle));
+  char lower_needle = absl::ascii_tolower(static_cast<unsigned char>(needle));
+  if (upper_needle == lower_needle) {
+    return StrContains(haystack, needle);
+  } else {
+    const char both_cstr[3] = {lower_needle, upper_needle, '\0'};
+    return haystack.find_first_of(both_cstr) != absl::string_view::npos;
+  }
+}
+
+bool StartsWithIgnoreCase(absl::string_view text,
+                          absl::string_view prefix) noexcept {
   return (text.size() >= prefix.size()) &&
          EqualsIgnoreCase(text.substr(0, prefix.size()), prefix);
 }
 
-bool EndsWithIgnoreCase(absl::string_view text, absl::string_view suffix) {
+bool EndsWithIgnoreCase(absl::string_view text,
+                        absl::string_view suffix) noexcept {
   return (text.size() >= suffix.size()) &&
          EqualsIgnoreCase(text.substr(text.size() - suffix.size()), suffix);
 }
 
+absl::string_view FindLongestCommonPrefix(absl::string_view a,
+                                          absl::string_view b) {
+  const absl::string_view::size_type limit = std::min(a.size(), b.size());
+  const char* const pa = a.data();
+  const char* const pb = b.data();
+  absl::string_view::size_type count = (unsigned) 0;
+
+  if (ABSL_PREDICT_FALSE(limit < 8)) {
+    while (ABSL_PREDICT_TRUE(count + 2 <= limit)) {
+      uint16_t xor_bytes = absl::little_endian::Load16(pa + count) ^
+                           absl::little_endian::Load16(pb + count);
+      if (ABSL_PREDICT_FALSE(xor_bytes != 0)) {
+        if (ABSL_PREDICT_TRUE((xor_bytes & 0xff) == 0)) ++count;
+        return absl::string_view(pa, count);
+      }
+      count += 2;
+    }
+    if (ABSL_PREDICT_TRUE(count != limit)) {
+      if (ABSL_PREDICT_TRUE(pa[count] == pb[count])) ++count;
+    }
+    return absl::string_view(pa, count);
+  }
+
+  do {
+    uint64_t xor_bytes = absl::little_endian::Load64(pa + count) ^
+                         absl::little_endian::Load64(pb + count);
+    if (ABSL_PREDICT_FALSE(xor_bytes != 0)) {
+      count += static_cast<uint64_t>(absl::countr_zero(xor_bytes) >> 3);
+      return absl::string_view(pa, count);
+    }
+    count += 8;
+  } while (ABSL_PREDICT_TRUE(count + 8 < limit));
+
+  count = limit - 8;
+  uint64_t xor_bytes = absl::little_endian::Load64(pa + count) ^
+                       absl::little_endian::Load64(pb + count);
+  if (ABSL_PREDICT_TRUE(xor_bytes != 0)) {
+    count += static_cast<uint64_t>(absl::countr_zero(xor_bytes) >> 3);
+    return absl::string_view(pa, count);
+  }
+  return absl::string_view(pa, limit);
+}
+
+absl::string_view FindLongestCommonSuffix(absl::string_view a,
+                                          absl::string_view b) {
+  const absl::string_view::size_type limit = std::min(a.size(), b.size());
+  if (limit == 0) return absl::string_view();
+
+  const char* pa = a.data() + a.size() - 1;
+  const char* pb = b.data() + b.size() - 1;
+  absl::string_view::size_type count = (unsigned) 0;
+  while (count < limit && *pa == *pb) {
+    --pa;
+    --pb;
+    ++count;
+  }
+
+  return absl::string_view(++pa, count);
+}
+
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/strings/match.h b/abseil-cpp/absl/strings/match.h
index 90fca98..1eeafbb 100644
--- a/abseil-cpp/absl/strings/match.h
+++ b/abseil-cpp/absl/strings/match.h
@@ -43,14 +43,20 @@
 // StrContains()
 //
 // Returns whether a given string `haystack` contains the substring `needle`.
-inline bool StrContains(absl::string_view haystack, absl::string_view needle) {
+inline bool StrContains(absl::string_view haystack,
+                        absl::string_view needle) noexcept {
   return haystack.find(needle, 0) != haystack.npos;
 }
 
+inline bool StrContains(absl::string_view haystack, char needle) noexcept {
+  return haystack.find(needle) != haystack.npos;
+}
+
 // StartsWith()
 //
 // Returns whether a given string `text` begins with `prefix`.
-inline bool StartsWith(absl::string_view text, absl::string_view prefix) {
+inline bool StartsWith(absl::string_view text,
+                       absl::string_view prefix) noexcept {
   return prefix.empty() ||
          (text.size() >= prefix.size() &&
           memcmp(text.data(), prefix.data(), prefix.size()) == 0);
@@ -59,30 +65,53 @@
 // EndsWith()
 //
 // Returns whether a given string `text` ends with `suffix`.
-inline bool EndsWith(absl::string_view text, absl::string_view suffix) {
+inline bool EndsWith(absl::string_view text,
+                     absl::string_view suffix) noexcept {
   return suffix.empty() ||
          (text.size() >= suffix.size() &&
           memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
                  suffix.size()) == 0);
 }
+// StrContainsIgnoreCase()
+//
+// Returns whether a given ASCII string `haystack` contains the ASCII substring
+// `needle`, ignoring case in the comparison.
+bool StrContainsIgnoreCase(absl::string_view haystack,
+                           absl::string_view needle) noexcept;
+
+bool StrContainsIgnoreCase(absl::string_view haystack,
+                           char needle) noexcept;
 
 // EqualsIgnoreCase()
 //
 // Returns whether given ASCII strings `piece1` and `piece2` are equal, ignoring
 // case in the comparison.
-bool EqualsIgnoreCase(absl::string_view piece1, absl::string_view piece2);
+bool EqualsIgnoreCase(absl::string_view piece1,
+                      absl::string_view piece2) noexcept;
 
 // StartsWithIgnoreCase()
 //
 // Returns whether a given ASCII string `text` starts with `prefix`,
 // ignoring case in the comparison.
-bool StartsWithIgnoreCase(absl::string_view text, absl::string_view prefix);
+bool StartsWithIgnoreCase(absl::string_view text,
+                          absl::string_view prefix) noexcept;
 
 // EndsWithIgnoreCase()
 //
 // Returns whether a given ASCII string `text` ends with `suffix`, ignoring
 // case in the comparison.
-bool EndsWithIgnoreCase(absl::string_view text, absl::string_view suffix);
+bool EndsWithIgnoreCase(absl::string_view text,
+                        absl::string_view suffix) noexcept;
+
+// Yields the longest prefix in common between both input strings.
+// Pointer-wise, the returned result is a subset of input "a".
+absl::string_view FindLongestCommonPrefix(absl::string_view a,
+                                          absl::string_view b);
+
+// Yields the longest suffix in common between both input strings.
+// Pointer-wise, the returned result is a subset of input "a".
+absl::string_view FindLongestCommonSuffix(absl::string_view a,
+                                          absl::string_view b);
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/strings/match_test.cc b/abseil-cpp/absl/strings/match_test.cc
index 4c313dd..71618f7 100644
--- a/abseil-cpp/absl/strings/match_test.cc
+++ b/abseil-cpp/absl/strings/match_test.cc
@@ -66,6 +66,23 @@
   EXPECT_FALSE(absl::StrContains("", "a"));
 }
 
+TEST(MatchTest, ContainsChar) {
+  absl::string_view a("abcdefg");
+  absl::string_view b("abcd");
+  EXPECT_TRUE(absl::StrContains(a, 'a'));
+  EXPECT_TRUE(absl::StrContains(a, 'b'));
+  EXPECT_TRUE(absl::StrContains(a, 'e'));
+  EXPECT_FALSE(absl::StrContains(a, 'h'));
+
+  EXPECT_TRUE(absl::StrContains(b, 'a'));
+  EXPECT_TRUE(absl::StrContains(b, 'b'));
+  EXPECT_FALSE(absl::StrContains(b, 'e'));
+  EXPECT_FALSE(absl::StrContains(b, 'h'));
+
+  EXPECT_FALSE(absl::StrContains("", 'a'));
+  EXPECT_FALSE(absl::StrContains("", 'a'));
+}
+
 TEST(MatchTest, ContainsNull) {
   const std::string s = "foo";
   const char* cs = "foo";
@@ -107,4 +124,165 @@
   EXPECT_FALSE(absl::EndsWithIgnoreCase("", "fo"));
 }
 
+TEST(MatchTest, ContainsIgnoreCase) {
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("foo", "foo"));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("FOO", "Foo"));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("--FOO", "Foo"));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("FOO--", "Foo"));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase("BAR", "Foo"));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase("BAR", "Foo"));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("123456", "123456"));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("123456", "234"));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("", ""));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase("abc", ""));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase("", "a"));
+}
+
+TEST(MatchTest, ContainsCharIgnoreCase) {
+  absl::string_view a("AaBCdefg!");
+  absl::string_view b("AaBCd!");
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'a'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'A'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'b'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'B'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'e'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'E'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(a, 'h'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(a, 'H'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(a, '!'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(a, '?'));
+
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'a'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'A'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'b'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'B'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'e'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'E'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'h'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'H'));
+  EXPECT_TRUE(absl::StrContainsIgnoreCase(b, '!'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase(b, '?'));
+
+  EXPECT_FALSE(absl::StrContainsIgnoreCase("", 'a'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase("", 'A'));
+  EXPECT_FALSE(absl::StrContainsIgnoreCase("", '0'));
+}
+
+TEST(MatchTest, FindLongestCommonPrefix) {
+  EXPECT_EQ(absl::FindLongestCommonPrefix("", ""), "");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("", "abc"), "");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("abc", ""), "");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("ab", "abc"), "ab");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("abc", "ab"), "ab");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("abc", "abd"), "ab");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("abc", "abcd"), "abc");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("abcd", "abcd"), "abcd");
+  EXPECT_EQ(absl::FindLongestCommonPrefix("abcd", "efgh"), "");
+
+  // "abcde" v. "abc" but in the middle of other data
+  EXPECT_EQ(absl::FindLongestCommonPrefix(
+                absl::string_view("1234 abcdef").substr(5, 5),
+                absl::string_view("5678 abcdef").substr(5, 3)),
+            "abc");
+}
+
+// Since the little-endian implementation involves a bit of if-else and various
+// return paths, the following tests aims to provide full test coverage of the
+// implementation.
+TEST(MatchTest, FindLongestCommonPrefixLoad16Mismatch) {
+  const std::string x1 = "abcdefgh";
+  const std::string x2 = "abcde_";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcde");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcde");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLoad16MatchesNoLast) {
+  const std::string x1 = "abcdef";
+  const std::string x2 = "abcdef";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcdef");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcdef");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLoad16MatchesLastCharMismatches) {
+  const std::string x1 = "abcdefg";
+  const std::string x2 = "abcdef_h";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcdef");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcdef");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLoad16MatchesLastMatches) {
+  const std::string x1 = "abcde";
+  const std::string x2 = "abcdefgh";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcde");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcde");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize8Load64Mismatches) {
+  const std::string x1 = "abcdefghijk";
+  const std::string x2 = "abcde_g_";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcde");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcde");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize8Load64Matches) {
+  const std::string x1 = "abcdefgh";
+  const std::string x2 = "abcdefgh";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcdefgh");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcdefgh");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize15Load64Mismatches) {
+  const std::string x1 = "012345670123456";
+  const std::string x2 = "0123456701_34_6";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "0123456701");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "0123456701");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize15Load64Matches) {
+  const std::string x1 = "012345670123456";
+  const std::string x2 = "0123456701234567";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "012345670123456");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "012345670123456");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSizeFirstByteOfLast8BytesMismatch) {
+  const std::string x1 = "012345670123456701234567";
+  const std::string x2 = "0123456701234567_1234567";
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "0123456701234567");
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "0123456701234567");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLargeLastCharMismatches) {
+  const std::string x1(300, 'x');
+  std::string x2 = x1;
+  x2.back() = '#';
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), std::string(299, 'x'));
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), std::string(299, 'x'));
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLargeFullMatch) {
+  const std::string x1(300, 'x');
+  const std::string x2 = x1;
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), std::string(300, 'x'));
+  EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), std::string(300, 'x'));
+}
+
+TEST(MatchTest, FindLongestCommonSuffix) {
+  EXPECT_EQ(absl::FindLongestCommonSuffix("", ""), "");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("", "abc"), "");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("abc", ""), "");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("bc", "abc"), "bc");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("abc", "bc"), "bc");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("abc", "dbc"), "bc");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("bcd", "abcd"), "bcd");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("abcd", "abcd"), "abcd");
+  EXPECT_EQ(absl::FindLongestCommonSuffix("abcd", "efgh"), "");
+
+  // "abcde" v. "cde" but in the middle of other data
+  EXPECT_EQ(absl::FindLongestCommonSuffix(
+                absl::string_view("1234 abcdef").substr(5, 5),
+                absl::string_view("5678 abcdef").substr(7, 3)),
+            "cde");
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/strings/numbers.cc b/abseil-cpp/absl/strings/numbers.cc
index 68c26dd..c43c6bc 100644
--- a/abseil-cpp/absl/strings/numbers.cc
+++ b/abseil-cpp/absl/strings/numbers.cc
@@ -31,8 +31,10 @@
 #include <utility>
 
 #include "absl/base/attributes.h"
-#include "absl/base/internal/bits.h"
+#include "absl/base/internal/endian.h"
 #include "absl/base/internal/raw_logging.h"
+#include "absl/base/optimization.h"
+#include "absl/numeric/bits.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/charconv.h"
 #include "absl/strings/escaping.h"
@@ -46,8 +48,13 @@
 bool SimpleAtof(absl::string_view str, float* out) {
   *out = 0.0;
   str = StripAsciiWhitespace(str);
+  // std::from_chars doesn't accept an initial +, but SimpleAtof does, so if one
+  // is present, skip it, while avoiding accepting "+-0" as valid.
   if (!str.empty() && str[0] == '+') {
     str.remove_prefix(1);
+    if (!str.empty() && str[0] == '-') {
+      return false;
+    }
   }
   auto result = absl::from_chars(str.data(), str.data() + str.size(), *out);
   if (result.ec == std::errc::invalid_argument) {
@@ -72,8 +79,13 @@
 bool SimpleAtod(absl::string_view str, double* out) {
   *out = 0.0;
   str = StripAsciiWhitespace(str);
+  // std::from_chars doesn't accept an initial +, but SimpleAtod does, so if one
+  // is present, skip it, while avoiding accepting "+-0" as valid.
   if (!str.empty() && str[0] == '+') {
     str.remove_prefix(1);
+    if (!str.empty() && str[0] == '-') {
+      return false;
+    }
   }
   auto result = absl::from_chars(str.data(), str.data() + str.size(), *out);
   if (result.ec == std::errc::invalid_argument) {
@@ -126,90 +138,140 @@
 
 namespace {
 
-// Used to optimize printing a decimal number's final digit.
-const char one_ASCII_final_digits[10][2] {
-  {'0', 0}, {'1', 0}, {'2', 0}, {'3', 0}, {'4', 0},
-  {'5', 0}, {'6', 0}, {'7', 0}, {'8', 0}, {'9', 0},
-};
+// Various routines to encode integers to strings.
+
+// We split data encodings into a group of 2 digits, 4 digits, 8 digits as
+// it's easier to combine powers of two into scalar arithmetic.
+
+// Previous implementation used a lookup table of 200 bytes for every 2 bytes
+// and it was memory bound, any L1 cache miss would result in a much slower
+// result. When benchmarking with a cache eviction rate of several percent,
+// this implementation proved to be better.
+
+// These constants represent '00', '0000' and '00000000' as ascii strings in
+// integers. We can add these numbers if we encode to bytes from 0 to 9. as
+// 'i' = '0' + i for 0 <= i <= 9.
+constexpr uint32_t kTwoZeroBytes = 0x0101 * '0';
+constexpr uint64_t kFourZeroBytes = 0x01010101 * '0';
+constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0';
+
+// * 103 / 1024 is a division by 10 for values from 0 to 99. It's also a
+// division of a structure [k takes 2 bytes][m takes 2 bytes], then * 103 / 1024
+// will be [k / 10][m / 10]. It allows parallel division.
+constexpr uint64_t kDivisionBy10Mul = 103u;
+constexpr uint64_t kDivisionBy10Div = 1 << 10;
+
+// * 10486 / 1048576 is a division by 100 for values from 0 to 9999.
+constexpr uint64_t kDivisionBy100Mul = 10486u;
+constexpr uint64_t kDivisionBy100Div = 1 << 20;
+
+// Encode functions write the ASCII output of input `n` to `out_str`.
+inline char* EncodeHundred(uint32_t n, char* out_str) {
+  int num_digits = static_cast<int>(n - 10) >> 8;
+  uint32_t base = kTwoZeroBytes;
+  uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
+  uint32_t mod10 = n - 10u * div10;
+  base += div10 + (mod10 << 8);
+  base >>= num_digits & 8;
+  little_endian::Store16(out_str, static_cast<uint16_t>(base));
+  return out_str + 2 + num_digits;
+}
+
+inline char* EncodeTenThousand(uint32_t n, char* out_str) {
+  // We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
+  // blocks. 123 ->  [\0\1][\0\23]. We divide by 10 both blocks
+  // (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
+  // parallel". Then we combine both results to have both ASCII digits,
+  // strip trailing zeros, add ASCII '0000' and return.
+  uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div;
+  uint32_t mod100 = n - 100ull * div100;
+  uint32_t hundreds = (mod100 << 16) + div100;
+  uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
+  tens &= (0xFull << 16) | 0xFull;
+  tens += (hundreds - 10ull * tens) << 8;
+  ABSL_ASSUME(tens != 0);
+  // The result can contain trailing zero bits, we need to strip them to a first
+  // significant byte in a final representation. For example, for n = 123, we
+  // have tens to have representation \0\1\2\3. We do `& -8` to round
+  // to a multiple to 8 to strip zero bytes, not all zero bits.
+  // countr_zero to help.
+  // 0 minus 8 to make MSVC happy.
+  uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(tens)) & (0 - 8ull);
+  tens += kFourZeroBytes;
+  tens >>= zeroes;
+  little_endian::Store32(out_str, tens);
+  return out_str + sizeof(tens) - zeroes / 8;
+}
+
+// Prepare functions return an integer that should be written to out_str
+// (but possibly include trailing zeros).
+// For hi < 10000, lo < 10000 returns uint64_t as encoded in ASCII with
+// possibly trailing zeroes of the number hi * 10000 + lo.
+inline uint64_t PrepareTenThousands(uint64_t hi, uint64_t lo) {
+  uint64_t merged = hi | (lo << 32);
+  uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) &
+                    ((0x7Full << 32) | 0x7Full);
+  uint64_t mod100 = merged - 100ull * div100;
+  uint64_t hundreds = (mod100 << 16) + div100;
+  uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
+  tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull;
+  tens += (hundreds - 10ull * tens) << 8;
+  return tens;
+}
+
+inline char* EncodeFullU32(uint32_t n, char* out_str) {
+  if (n < 100'000'000) {
+    uint64_t bottom = PrepareTenThousands(n / 10000, n % 10000);
+    ABSL_ASSUME(bottom != 0);
+    // 0 minus 8 to make MSVC happy.
+    uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(bottom))
+        & (0 - 8ull);
+    uint64_t bottom_res = bottom + kEightZeroBytes;
+    bottom_res >>= zeroes;
+    little_endian::Store64(out_str, bottom_res);
+    return out_str + sizeof(bottom) - zeroes / 8;
+  }
+  uint32_t top = n / 100'000'000;
+  n %= 100'000'000;
+  uint64_t bottom = PrepareTenThousands(n / 10000, n % 10000);
+  uint64_t bottom_res = bottom + kEightZeroBytes;
+  out_str = EncodeHundred(top, out_str);
+  little_endian::Store64(out_str, bottom_res);
+  return out_str + sizeof(bottom);
+}
 
 }  // namespace
 
-char* numbers_internal::FastIntToBuffer(uint32_t i, char* buffer) {
-  uint32_t digits;
-  // The idea of this implementation is to trim the number of divides to as few
-  // as possible, and also reducing memory stores and branches, by going in
-  // steps of two digits at a time rather than one whenever possible.
-  // The huge-number case is first, in the hopes that the compiler will output
-  // that case in one branch-free block of code, and only output conditional
-  // branches into it from below.
-  if (i >= 1000000000) {     // >= 1,000,000,000
-    digits = i / 100000000;  //      100,000,000
-    i -= digits * 100000000;
-    PutTwoDigits(digits, buffer);
-    buffer += 2;
-  lt100_000_000:
-    digits = i / 1000000;  // 1,000,000
-    i -= digits * 1000000;
-    PutTwoDigits(digits, buffer);
-    buffer += 2;
-  lt1_000_000:
-    digits = i / 10000;  // 10,000
-    i -= digits * 10000;
-    PutTwoDigits(digits, buffer);
-    buffer += 2;
-  lt10_000:
-    digits = i / 100;
-    i -= digits * 100;
-    PutTwoDigits(digits, buffer);
-    buffer += 2;
- lt100:
-    digits = i;
-    PutTwoDigits(digits, buffer);
-    buffer += 2;
-    *buffer = 0;
-    return buffer;
-  }
+void numbers_internal::PutTwoDigits(uint32_t i, char* buf) {
+  assert(i < 100);
+  uint32_t base = kTwoZeroBytes;
+  uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
+  uint32_t mod10 = i - 10u * div10;
+  base += div10 + (mod10 << 8);
+  little_endian::Store16(buf, static_cast<uint16_t>(base));
+}
 
-  if (i < 100) {
-    digits = i;
-    if (i >= 10) goto lt100;
-    memcpy(buffer, one_ASCII_final_digits[i], 2);
-    return buffer + 1;
+char* numbers_internal::FastIntToBuffer(uint32_t n, char* out_str) {
+  if (n < 100) {
+    out_str = EncodeHundred(n, out_str);
+    goto set_last_zero;
   }
-  if (i < 10000) {  //    10,000
-    if (i >= 1000) goto lt10_000;
-    digits = i / 100;
-    i -= digits * 100;
-    *buffer++ = '0' + digits;
-    goto lt100;
+  if (n < 10000) {
+    out_str = EncodeTenThousand(n, out_str);
+    goto set_last_zero;
   }
-  if (i < 1000000) {  //    1,000,000
-    if (i >= 100000) goto lt1_000_000;
-    digits = i / 10000;  //    10,000
-    i -= digits * 10000;
-    *buffer++ = '0' + digits;
-    goto lt10_000;
-  }
-  if (i < 100000000) {  //    100,000,000
-    if (i >= 10000000) goto lt100_000_000;
-    digits = i / 1000000;  //   1,000,000
-    i -= digits * 1000000;
-    *buffer++ = '0' + digits;
-    goto lt1_000_000;
-  }
-  // we already know that i < 1,000,000,000
-  digits = i / 100000000;  //   100,000,000
-  i -= digits * 100000000;
-  *buffer++ = '0' + digits;
-  goto lt100_000_000;
+  out_str = EncodeFullU32(n, out_str);
+set_last_zero:
+  *out_str = '\0';
+  return out_str;
 }
 
 char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
-  uint32_t u = i;
+  uint32_t u = static_cast<uint32_t>(i);
   if (i < 0) {
     *buffer++ = '-';
     // We need to do the negation in modular (i.e., "unsigned")
-    // arithmetic; MSVC++ apprently warns for plain "-u", so
+    // arithmetic; MSVC++ apparently warns for plain "-u", so
     // we write the equivalent expression "0 - u" instead.
     u = 0 - u;
   }
@@ -220,45 +282,44 @@
   uint32_t u32 = static_cast<uint32_t>(i);
   if (u32 == i) return numbers_internal::FastIntToBuffer(u32, buffer);
 
-  // Here we know i has at least 10 decimal digits.
-  uint64_t top_1to11 = i / 1000000000;
-  u32 = static_cast<uint32_t>(i - top_1to11 * 1000000000);
-  uint32_t top_1to11_32 = static_cast<uint32_t>(top_1to11);
-
-  if (top_1to11_32 == top_1to11) {
-    buffer = numbers_internal::FastIntToBuffer(top_1to11_32, buffer);
-  } else {
-    // top_1to11 has more than 32 bits too; print it in two steps.
-    uint32_t top_8to9 = static_cast<uint32_t>(top_1to11 / 100);
-    uint32_t mid_2 = static_cast<uint32_t>(top_1to11 - top_8to9 * 100);
-    buffer = numbers_internal::FastIntToBuffer(top_8to9, buffer);
-    PutTwoDigits(mid_2, buffer);
-    buffer += 2;
+  // 10**9 < 2**32 <= i < 10**10, we can do 2+8
+  uint64_t div08 = i / 100'000'000ull;
+  uint64_t mod08 = i % 100'000'000ull;
+  uint64_t mod_result =
+      PrepareTenThousands(mod08 / 10000, mod08 % 10000) + kEightZeroBytes;
+  if (i < 10'000'000'000ull) {
+    buffer = EncodeHundred(static_cast<uint32_t>(div08), buffer);
+    little_endian::Store64(buffer, mod_result);
+    buffer += 8;
+    goto set_last_zero;
   }
 
-  // We have only 9 digits now, again the maximum uint32_t can handle fully.
-  uint32_t digits = u32 / 10000000;  // 10,000,000
-  u32 -= digits * 10000000;
-  PutTwoDigits(digits, buffer);
-  buffer += 2;
-  digits = u32 / 100000;  // 100,000
-  u32 -= digits * 100000;
-  PutTwoDigits(digits, buffer);
-  buffer += 2;
-  digits = u32 / 1000;  // 1,000
-  u32 -= digits * 1000;
-  PutTwoDigits(digits, buffer);
-  buffer += 2;
-  digits = u32 / 10;
-  u32 -= digits * 10;
-  PutTwoDigits(digits, buffer);
-  buffer += 2;
-  memcpy(buffer, one_ASCII_final_digits[u32], 2);
-  return buffer + 1;
+  // i < 10**16, in this case 8+8
+  if (i < 10'000'000'000'000'000ull) {
+    buffer = EncodeFullU32(static_cast<uint32_t>(div08), buffer);
+    little_endian::Store64(buffer, mod_result);
+    buffer += 8;
+    goto set_last_zero;
+  } else {
+    // 4 + 8 + 8
+    uint64_t div016 = i / 10'000'000'000'000'000ull;
+    buffer = EncodeTenThousand(static_cast<uint32_t>(div016), buffer);
+    uint64_t mid_result = div08 - div016 * 100'000'000ull;
+    mid_result = PrepareTenThousands(mid_result / 10000, mid_result % 10000) +
+                 kEightZeroBytes;
+    little_endian::Store64(buffer, mid_result);
+    buffer += 8;
+    little_endian::Store64(buffer, mod_result);
+    buffer += 8;
+    goto set_last_zero;
+  }
+set_last_zero:
+  *buffer = '\0';
+  return buffer;
 }
 
 char* numbers_internal::FastIntToBuffer(int64_t i, char* buffer) {
-  uint64_t u = i;
+  uint64_t u = static_cast<uint64_t>(i);
   if (i < 0) {
     *buffer++ = '-';
     u = 0 - u;
@@ -303,7 +364,7 @@
   uint64_t bits128_up = (bits96_127 >> 32) + (bits64_127 < bits64_95);
   if (bits128_up == 0) return {bits64_127, bits0_63};
 
-  int shift = 64 - base_internal::CountLeadingZeros64(bits128_up);
+  auto shift = static_cast<unsigned>(bit_width(bits128_up));
   uint64_t lo = (bits0_63 >> shift) + (bits64_127 << (64 - shift));
   uint64_t hi = (bits64_127 >> shift) + (bits128_up << (64 - shift));
   return {hi, lo};
@@ -319,7 +380,7 @@
     result = Mul32(result, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5);
     expfive -= 13;
   }
-  constexpr int powers_of_five[13] = {
+  constexpr uint32_t powers_of_five[13] = {
       1,
       5,
       5 * 5,
@@ -334,7 +395,7 @@
       5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
       5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5};
   result = Mul32(result, powers_of_five[expfive & 15]);
-  int shift = base_internal::CountLeadingZeros64(result.first);
+  int shift = countl_zero(result.first);
   if (shift != 0) {
     result.first = (result.first << shift) + (result.second >> (64 - shift));
     result.second = (result.second << shift);
@@ -394,14 +455,14 @@
   // we multiply it by 65536 and see if the fractional part is close to 32768.
   // (The number doesn't have to be a power of two,but powers of two are faster)
   uint64_t d64k = d * 65536;
-  int dddddd;  // A 6-digit decimal integer.
+  uint32_t dddddd;  // A 6-digit decimal integer.
   if ((d64k % 65536) == 32767 || (d64k % 65536) == 32768) {
     // OK, it's fairly likely that precision was lost above, which is
     // not a surprise given only 52 mantissa bits are available.  Therefore
     // redo the calculation using 128-bit numbers.  (64 bits are not enough).
 
     // Start out with digits rounded down; maybe add one below.
-    dddddd = static_cast<int>(d64k / 65536);
+    dddddd = static_cast<uint32_t>(d64k / 65536);
 
     // mantissa is a 64-bit integer representing M.mmm... * 2^63.  The actual
     // value we're representing, of course, is M.mmm... * 2^exp2.
@@ -451,7 +512,7 @@
     }
   } else {
     // Here, we are not close to the edge.
-    dddddd = static_cast<int>((d64k + 32768) / 65536);
+    dddddd = static_cast<uint32_t>((d64k + 32768) / 65536);
   }
   if (dddddd == 1000000) {
     dddddd = 100000;
@@ -459,7 +520,7 @@
   }
   exp_dig.exponent = exp;
 
-  int two_digits = dddddd / 10000;
+  uint32_t two_digits = dddddd / 10000;
   dddddd -= two_digits * 10000;
   numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[0]);
 
@@ -489,15 +550,15 @@
     if (std::signbit(d)) *out++ = '-';
     *out++ = '0';
     *out = 0;
-    return out - buffer;
+    return static_cast<size_t>(out - buffer);
   }
   if (d < 0) {
     *out++ = '-';
     d = -d;
   }
-  if (std::isinf(d)) {
+  if (d > std::numeric_limits<double>::max()) {
     strcpy(out, "inf");  // NOLINT(runtime/printf)
-    return out + 3 - buffer;
+    return static_cast<size_t>(out + 3 - buffer);
   }
 
   auto exp_dig = SplitToSix(d);
@@ -509,7 +570,7 @@
     case 5:
       memcpy(out, &digits[0], 6), out += 6;
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
     case 4:
       memcpy(out, &digits[0], 5), out += 5;
       if (digits[5] != '0') {
@@ -517,7 +578,7 @@
         *out++ = digits[5];
       }
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
     case 3:
       memcpy(out, &digits[0], 4), out += 4;
       if ((digits[5] | digits[4]) != '0') {
@@ -526,7 +587,7 @@
         if (digits[5] != '0') *out++ = digits[5];
       }
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
     case 2:
       memcpy(out, &digits[0], 3), out += 3;
       *out++ = '.';
@@ -535,7 +596,7 @@
       while (out[-1] == '0') --out;
       if (out[-1] == '.') --out;
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
     case 1:
       memcpy(out, &digits[0], 2), out += 2;
       *out++ = '.';
@@ -544,7 +605,7 @@
       while (out[-1] == '0') --out;
       if (out[-1] == '.') --out;
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
     case 0:
       memcpy(out, &digits[0], 1), out += 1;
       *out++ = '.';
@@ -553,7 +614,7 @@
       while (out[-1] == '0') --out;
       if (out[-1] == '.') --out;
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
     case -4:
       out[2] = '0';
       ++out;
@@ -572,7 +633,7 @@
       out += 6;
       while (out[-1] == '0') --out;
       *out = 0;
-      return out - buffer;
+      return static_cast<size_t>(out - buffer);
   }
   assert(exp < -4 || exp >= 6);
   out[0] = digits[0];
@@ -591,12 +652,12 @@
   if (exp > 99) {
     int dig1 = exp / 100;
     exp -= dig1 * 100;
-    *out++ = '0' + dig1;
+    *out++ = '0' + static_cast<char>(dig1);
   }
-  PutTwoDigits(exp, out);
+  PutTwoDigits(static_cast<uint32_t>(exp), out);
   out += 2;
   *out = 0;
-  return out - buffer;
+  return static_cast<size_t>(out - buffer);
 }
 
 namespace {
@@ -632,10 +693,12 @@
   int base = *base_ptr;
 
   // Consume whitespace.
-  while (start < end && absl::ascii_isspace(start[0])) {
+  while (start < end &&
+         absl::ascii_isspace(static_cast<unsigned char>(start[0]))) {
     ++start;
   }
-  while (start < end && absl::ascii_isspace(end[-1])) {
+  while (start < end &&
+         absl::ascii_isspace(static_cast<unsigned char>(end[-1]))) {
     --end;
   }
   if (start >= end) {
@@ -684,7 +747,7 @@
   } else {
     return false;
   }
-  *text = absl::string_view(start, end - start);
+  *text = absl::string_view(start, static_cast<size_t>(end - start));
   *base_ptr = base;
   return true;
 }
@@ -736,10 +799,19 @@
         X / 35, X / 36,                                                   \
   }
 
+// This kVmaxOverBase is generated with
+//  for (int base = 2; base < 37; ++base) {
+//    absl::uint128 max = std::numeric_limits<absl::uint128>::max();
+//    auto result = max / base;
+//    std::cout << "    MakeUint128(" << absl::Uint128High64(result) << "u, "
+//              << absl::Uint128Low64(result) << "u),\n";
+//  }
+// See https://godbolt.org/z/aneYsb
+//
 // uint128& operator/=(uint128) is not constexpr, so hardcode the resulting
 // array to avoid a static initializer.
 template <>
-const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
+ABSL_CONST_INIT const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
     0,
     0,
     MakeUint128(9223372036854775807u, 18446744073709551615u),
@@ -779,12 +851,117 @@
     MakeUint128(512409557603043100u, 8198552921648689607u),
 };
 
+// This kVmaxOverBase generated with
+//   for (int base = 2; base < 37; ++base) {
+//    absl::int128 max = std::numeric_limits<absl::int128>::max();
+//    auto result = max / base;
+//    std::cout << "\tMakeInt128(" << absl::Int128High64(result) << ", "
+//              << absl::Int128Low64(result) << "u),\n";
+//  }
+// See https://godbolt.org/z/7djYWz
+//
+// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
+// to avoid a static initializer.
+template <>
+ABSL_CONST_INIT const int128 LookupTables<int128>::kVmaxOverBase[] = {
+    0,
+    0,
+    MakeInt128(4611686018427387903, 18446744073709551615u),
+    MakeInt128(3074457345618258602, 12297829382473034410u),
+    MakeInt128(2305843009213693951, 18446744073709551615u),
+    MakeInt128(1844674407370955161, 11068046444225730969u),
+    MakeInt128(1537228672809129301, 6148914691236517205u),
+    MakeInt128(1317624576693539401, 2635249153387078802u),
+    MakeInt128(1152921504606846975, 18446744073709551615u),
+    MakeInt128(1024819115206086200, 16397105843297379214u),
+    MakeInt128(922337203685477580, 14757395258967641292u),
+    MakeInt128(838488366986797800, 13415813871788764811u),
+    MakeInt128(768614336404564650, 12297829382473034410u),
+    MakeInt128(709490156681136600, 11351842506898185609u),
+    MakeInt128(658812288346769700, 10540996613548315209u),
+    MakeInt128(614891469123651720, 9838263505978427528u),
+    MakeInt128(576460752303423487, 18446744073709551615u),
+    MakeInt128(542551296285575047, 9765923333140350855u),
+    MakeInt128(512409557603043100, 8198552921648689607u),
+    MakeInt128(485440633518672410, 17475862806672206794u),
+    MakeInt128(461168601842738790, 7378697629483820646u),
+    MakeInt128(439208192231179800, 7027331075698876806u),
+    MakeInt128(419244183493398900, 6707906935894382405u),
+    MakeInt128(401016175515425035, 2406097053092550210u),
+    MakeInt128(384307168202282325, 6148914691236517205u),
+    MakeInt128(368934881474191032, 5902958103587056517u),
+    MakeInt128(354745078340568300, 5675921253449092804u),
+    MakeInt128(341606371735362066, 17763531330238827482u),
+    MakeInt128(329406144173384850, 5270498306774157604u),
+    MakeInt128(318047311615681924, 7633135478776366185u),
+    MakeInt128(307445734561825860, 4919131752989213764u),
+    MakeInt128(297528130221121800, 4760450083537948804u),
+    MakeInt128(288230376151711743, 18446744073709551615u),
+    MakeInt128(279496122328932600, 4471937957262921603u),
+    MakeInt128(271275648142787523, 14106333703424951235u),
+    MakeInt128(263524915338707880, 4216398645419326083u),
+    MakeInt128(256204778801521550, 4099276460824344803u),
+};
+
+// This kVminOverBase generated with
+//  for (int base = 2; base < 37; ++base) {
+//    absl::int128 min = std::numeric_limits<absl::int128>::min();
+//    auto result = min / base;
+//    std::cout << "\tMakeInt128(" << absl::Int128High64(result) << ", "
+//              << absl::Int128Low64(result) << "u),\n";
+//  }
+//
+// See https://godbolt.org/z/7djYWz
+//
+// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
+// to avoid a static initializer.
+template <>
+ABSL_CONST_INIT const int128 LookupTables<int128>::kVminOverBase[] = {
+    0,
+    0,
+    MakeInt128(-4611686018427387904, 0u),
+    MakeInt128(-3074457345618258603, 6148914691236517206u),
+    MakeInt128(-2305843009213693952, 0u),
+    MakeInt128(-1844674407370955162, 7378697629483820647u),
+    MakeInt128(-1537228672809129302, 12297829382473034411u),
+    MakeInt128(-1317624576693539402, 15811494920322472814u),
+    MakeInt128(-1152921504606846976, 0u),
+    MakeInt128(-1024819115206086201, 2049638230412172402u),
+    MakeInt128(-922337203685477581, 3689348814741910324u),
+    MakeInt128(-838488366986797801, 5030930201920786805u),
+    MakeInt128(-768614336404564651, 6148914691236517206u),
+    MakeInt128(-709490156681136601, 7094901566811366007u),
+    MakeInt128(-658812288346769701, 7905747460161236407u),
+    MakeInt128(-614891469123651721, 8608480567731124088u),
+    MakeInt128(-576460752303423488, 0u),
+    MakeInt128(-542551296285575048, 8680820740569200761u),
+    MakeInt128(-512409557603043101, 10248191152060862009u),
+    MakeInt128(-485440633518672411, 970881267037344822u),
+    MakeInt128(-461168601842738791, 11068046444225730970u),
+    MakeInt128(-439208192231179801, 11419412998010674810u),
+    MakeInt128(-419244183493398901, 11738837137815169211u),
+    MakeInt128(-401016175515425036, 16040647020617001406u),
+    MakeInt128(-384307168202282326, 12297829382473034411u),
+    MakeInt128(-368934881474191033, 12543785970122495099u),
+    MakeInt128(-354745078340568301, 12770822820260458812u),
+    MakeInt128(-341606371735362067, 683212743470724134u),
+    MakeInt128(-329406144173384851, 13176245766935394012u),
+    MakeInt128(-318047311615681925, 10813608594933185431u),
+    MakeInt128(-307445734561825861, 13527612320720337852u),
+    MakeInt128(-297528130221121801, 13686293990171602812u),
+    MakeInt128(-288230376151711744, 0u),
+    MakeInt128(-279496122328932601, 13974806116446630013u),
+    MakeInt128(-271275648142787524, 4340410370284600381u),
+    MakeInt128(-263524915338707881, 14230345428290225533u),
+    MakeInt128(-256204778801521551, 14347467612885206813u),
+};
+
 template <typename IntType>
-const IntType LookupTables<IntType>::kVmaxOverBase[] =
+ABSL_CONST_INIT const IntType LookupTables<IntType>::kVmaxOverBase[] =
     X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::max());
 
 template <typename IntType>
-const IntType LookupTables<IntType>::kVminOverBase[] =
+ABSL_CONST_INIT const IntType LookupTables<IntType>::kVminOverBase[] =
     X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::min());
 
 #undef X_OVER_BASE_INITIALIZER
@@ -796,17 +973,18 @@
   const IntType vmax = std::numeric_limits<IntType>::max();
   assert(vmax > 0);
   assert(base >= 0);
-  assert(vmax >= static_cast<IntType>(base));
+  const IntType base_inttype = static_cast<IntType>(base);
+  assert(vmax >= base_inttype);
   const IntType vmax_over_base = LookupTables<IntType>::kVmaxOverBase[base];
   assert(base < 2 ||
-         std::numeric_limits<IntType>::max() / base == vmax_over_base);
+         std::numeric_limits<IntType>::max() / base_inttype == vmax_over_base);
   const char* start = text.data();
   const char* end = start + text.size();
   // loop over digits
   for (; start < end; ++start) {
     unsigned char c = static_cast<unsigned char>(start[0]);
-    int digit = kAsciiToInt[c];
-    if (digit >= base) {
+    IntType digit = static_cast<IntType>(kAsciiToInt[c]);
+    if (digit >= base_inttype) {
       *value_p = value;
       return false;
     }
@@ -814,7 +992,7 @@
       *value_p = vmax;
       return false;
     }
-    value *= base;
+    value *= base_inttype;
     if (value > vmax - digit) {
       *value_p = vmax;
       return false;
@@ -921,25 +1099,6 @@
     "e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
     "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
 
-ABSL_CONST_INIT ABSL_DLL const char two_ASCII_digits[100][2] = {
-    {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'},
-    {'0', '6'}, {'0', '7'}, {'0', '8'}, {'0', '9'}, {'1', '0'}, {'1', '1'},
-    {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'},
-    {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'},
-    {'2', '4'}, {'2', '5'}, {'2', '6'}, {'2', '7'}, {'2', '8'}, {'2', '9'},
-    {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'},
-    {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'},
-    {'4', '2'}, {'4', '3'}, {'4', '4'}, {'4', '5'}, {'4', '6'}, {'4', '7'},
-    {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'},
-    {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'},
-    {'6', '0'}, {'6', '1'}, {'6', '2'}, {'6', '3'}, {'6', '4'}, {'6', '5'},
-    {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'},
-    {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'},
-    {'7', '8'}, {'7', '9'}, {'8', '0'}, {'8', '1'}, {'8', '2'}, {'8', '3'},
-    {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'},
-    {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'},
-    {'9', '6'}, {'9', '7'}, {'9', '8'}, {'9', '9'}};
-
 bool safe_strto32_base(absl::string_view text, int32_t* value, int base) {
   return safe_int_internal<int32_t>(text, value, base);
 }
@@ -948,6 +1107,10 @@
   return safe_int_internal<int64_t>(text, value, base);
 }
 
+bool safe_strto128_base(absl::string_view text, int128* value, int base) {
+  return safe_int_internal<absl::int128>(text, value, base);
+}
+
 bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base) {
   return safe_uint_internal<uint32_t>(text, value, base);
 }
diff --git a/abseil-cpp/absl/strings/numbers.h b/abseil-cpp/absl/strings/numbers.h
index d872cca..d7630ce 100644
--- a/abseil-cpp/absl/strings/numbers.h
+++ b/abseil-cpp/absl/strings/numbers.h
@@ -1,4 +1,3 @@
-//
 // Copyright 2017 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
@@ -24,8 +23,12 @@
 #ifndef ABSL_STRINGS_NUMBERS_H_
 #define ABSL_STRINGS_NUMBERS_H_
 
-#ifdef __SSE4_2__
-#include <x86intrin.h>
+#ifdef __SSSE3__
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
 #endif
 
 #include <cstddef>
@@ -37,17 +40,10 @@
 #include <type_traits>
 
 #include "absl/base/config.h"
-#include "absl/base/internal/bits.h"
-#ifdef __SSE4_2__
-// TODO(jorg): Remove this when we figure out the right way
-// to swap bytes on SSE 4.2 that works with the compilers
-// we claim to support.  Also, add tests for the compiler
-// that doesn't support the Intel _bswap64 intrinsic but
-// does support all the SSE 4.2 intrinsics
 #include "absl/base/internal/endian.h"
-#endif
 #include "absl/base/macros.h"
 #include "absl/base/port.h"
+#include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
 #include "absl/strings/string_view.h"
 
@@ -97,6 +93,25 @@
 // unspecified state.
 ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str, bool* out);
 
+// SimpleHexAtoi()
+//
+// Converts a hexadecimal string (optionally followed or preceded by ASCII
+// whitespace) to an integer, returning `true` if successful. Only valid base-16
+// hexadecimal integers whose value falls within the range of the integer type
+// (optionally preceded by a `+` or `-`) can be converted. A valid hexadecimal
+// value may include both upper and lowercase character symbols, and may
+// optionally include a leading "0x" (or "0X") number prefix, which is ignored
+// by this function. If any errors are encountered, this function returns
+// `false`, leaving `out` in an unspecified state.
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str, int_type* out);
+
+// Overloads of SimpleHexAtoi() for 128 bit integers.
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str,
+                                               absl::int128* out);
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str,
+                                               absl::uint128* out);
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
@@ -110,8 +125,6 @@
 ABSL_DLL extern const char kHexChar[17];  // 0123456789abcdef
 ABSL_DLL extern const char
     kHexTable[513];  // 000102030405060708090a0b0c0d0e0f1011...
-ABSL_DLL extern const char
-    two_ASCII_digits[100][2];  // 00, 01, 02, 03...
 
 // Writes a two-character representation of 'i' to 'buf'. 'i' must be in the
 // range 0 <= i < 100, and buf must have space for two characters. Example:
@@ -119,14 +132,14 @@
 //   PutTwoDigits(42, buf);
 //   // buf[0] == '4'
 //   // buf[1] == '2'
-inline void PutTwoDigits(size_t i, char* buf) {
-  assert(i < 100);
-  memcpy(buf, two_ASCII_digits[i], 2);
-}
+void PutTwoDigits(uint32_t i, char* buf);
 
 // safe_strto?() functions for implementing SimpleAtoi()
+
 bool safe_strto32_base(absl::string_view text, int32_t* value, int base);
 bool safe_strto64_base(absl::string_view text, int64_t* value, int base);
+bool safe_strto128_base(absl::string_view text, absl::int128* value,
+                         int base);
 bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base);
 bool safe_strtou64_base(absl::string_view text, uint64_t* value, int base);
 bool safe_strtou128_base(absl::string_view text, absl::uint128* value,
@@ -160,16 +173,19 @@
   // TODO(jorg): This signed-ness check is used because it works correctly
   // with enums, and it also serves to check that int_type is not a pointer.
   // If one day something like std::is_signed<enum E> works, switch to it.
-  if (static_cast<int_type>(1) - 2 < 0) {  // Signed
-    if (sizeof(i) > 32 / 8) {           // 33-bit to 64-bit
+  // These conditions are constexpr bools to suppress MSVC warning C4127.
+  constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+  constexpr bool kUse64Bit = sizeof(i) > 32 / 8;
+  if (kIsSigned) {
+    if (kUse64Bit) {
       return FastIntToBuffer(static_cast<int64_t>(i), buffer);
-    } else {  // 32-bit or less
+    } else {
       return FastIntToBuffer(static_cast<int32_t>(i), buffer);
     }
-  } else {                     // Unsigned
-    if (sizeof(i) > 32 / 8) {  // 33-bit to 64-bit
+  } else {
+    if (kUse64Bit) {
       return FastIntToBuffer(static_cast<uint64_t>(i), buffer);
-    } else {  // 32-bit or less
+    } else {
       return FastIntToBuffer(static_cast<uint32_t>(i), buffer);
     }
   }
@@ -188,22 +204,25 @@
   // TODO(jorg): This signed-ness check is used because it works correctly
   // with enums, and it also serves to check that int_type is not a pointer.
   // If one day something like std::is_signed<enum E> works, switch to it.
-  if (static_cast<int_type>(1) - 2 < 0) {  // Signed
-    if (sizeof(*out) == 64 / 8) {       // 64-bit
+  // These conditions are constexpr bools to suppress MSVC warning C4127.
+  constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+  constexpr bool kUse64Bit = sizeof(*out) == 64 / 8;
+  if (kIsSigned) {
+    if (kUse64Bit) {
       int64_t val;
       parsed = numbers_internal::safe_strto64_base(s, &val, base);
       *out = static_cast<int_type>(val);
-    } else {  // 32-bit
+    } else {
       int32_t val;
       parsed = numbers_internal::safe_strto32_base(s, &val, base);
       *out = static_cast<int_type>(val);
     }
-  } else {                         // Unsigned
-    if (sizeof(*out) == 64 / 8) {  // 64-bit
+  } else {
+    if (kUse64Bit) {
       uint64_t val;
       parsed = numbers_internal::safe_strtou64_base(s, &val, base);
       *out = static_cast<int_type>(val);
-    } else {  // 32-bit
+    } else {
       uint32_t val;
       parsed = numbers_internal::safe_strtou32_base(s, &val, base);
       *out = static_cast<int_type>(val);
@@ -219,7 +238,7 @@
 // Returns the number of non-pad digits of the output (it can never be zero
 // since 0 has one digit).
 inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
-#ifdef __SSE4_2__
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
   uint64_t be = absl::big_endian::FromHost64(val);
   const auto kNibbleMask = _mm_set1_epi8(0xf);
   const auto kHexDigits = _mm_setr_epi8('0', '1', '2', '3', '4', '5', '6', '7',
@@ -238,28 +257,41 @@
   }
 #endif
   // | 0x1 so that even 0 has 1 digit.
-  return 16 - absl::base_internal::CountLeadingZeros64(val | 0x1) / 4;
+  return 16 - static_cast<size_t>(countl_zero(val | 0x1) / 4);
 }
 
 }  // namespace numbers_internal
 
-// SimpleAtoi()
-//
-// Converts a string to an integer, using `safe_strto?()` functions for actual
-// parsing, returning `true` if successful. The `safe_strto?()` functions apply
-// strict checking; the string must be a base-10 integer, optionally followed or
-// preceded by ASCII whitespace, with a value in the range of the corresponding
-// integer type.
 template <typename int_type>
 ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str, int_type* out) {
   return numbers_internal::safe_strtoi_base(str, out, 10);
 }
 
 ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
+                                            absl::int128* out) {
+  return numbers_internal::safe_strto128_base(str, out, 10);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
                                             absl::uint128* out) {
   return numbers_internal::safe_strtou128_base(str, out, 10);
 }
 
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str, int_type* out) {
+  return numbers_internal::safe_strtoi_base(str, out, 16);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str,
+                                               absl::int128* out) {
+  return numbers_internal::safe_strto128_base(str, out, 16);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str,
+                                               absl::uint128* out) {
+  return numbers_internal::safe_strtou128_base(str, out, 16);
+}
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/abseil-cpp/absl/strings/numbers_test.cc b/abseil-cpp/absl/strings/numbers_test.cc
index c2f03b6..2864bda 100644
--- a/abseil-cpp/absl/strings/numbers_test.cc
+++ b/abseil-cpp/absl/strings/numbers_test.cc
@@ -19,6 +19,7 @@
 #include <sys/types.h>
 
 #include <cfenv>  // NOLINT(build/c++11)
+#include <cfloat>
 #include <cinttypes>
 #include <climits>
 #include <cmath>
@@ -36,7 +37,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/random/distributions.h"
 #include "absl/random/random.h"
 #include "absl/strings/internal/numbers_test_common.h"
@@ -46,6 +47,8 @@
 
 namespace {
 
+using absl::SimpleAtoi;
+using absl::SimpleHexAtoi;
 using absl::numbers_internal::kSixDigitsToBufferSize;
 using absl::numbers_internal::safe_strto32_base;
 using absl::numbers_internal::safe_strto64_base;
@@ -55,7 +58,6 @@
 using absl::strings_internal::Itoa;
 using absl::strings_internal::strtouint32_test_cases;
 using absl::strings_internal::strtouint64_test_cases;
-using absl::SimpleAtoi;
 using testing::Eq;
 using testing::MatchesRegex;
 
@@ -251,7 +253,7 @@
 template <typename int_type, typename in_val_type>
 void VerifySimpleAtoiGood(in_val_type in_value, int_type exp_value) {
   std::string s;
-  // uint128 can be streamed but not StrCat'd
+  // (u)int128 can be streamed but not StrCat'd.
   absl::strings_internal::OStringStream(&s) << in_value;
   int_type x = static_cast<int_type>(~exp_value);
   EXPECT_TRUE(SimpleAtoi(s, &x))
@@ -264,7 +266,9 @@
 
 template <typename int_type, typename in_val_type>
 void VerifySimpleAtoiBad(in_val_type in_value) {
-  std::string s = absl::StrCat(in_value);
+  std::string s;
+  // (u)int128 can be streamed but not StrCat'd.
+  absl::strings_internal::OStringStream(&s) << in_value;
   int_type x;
   EXPECT_FALSE(SimpleAtoi(s, &x));
   EXPECT_FALSE(SimpleAtoi(s.c_str(), &x));
@@ -347,22 +351,269 @@
       std::numeric_limits<absl::uint128>::max(),
       std::numeric_limits<absl::uint128>::max());
 
+  // SimpleAtoi(absl::string_view, absl::int128)
+  VerifySimpleAtoiGood<absl::int128>(0, 0);
+  VerifySimpleAtoiGood<absl::int128>(42, 42);
+  VerifySimpleAtoiGood<absl::int128>(-42, -42);
+
+  VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int32_t>::min(),
+                                      std::numeric_limits<int32_t>::min());
+  VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int32_t>::max(),
+                                      std::numeric_limits<int32_t>::max());
+  VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<uint32_t>::max(),
+                                      std::numeric_limits<uint32_t>::max());
+  VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int64_t>::min(),
+                                      std::numeric_limits<int64_t>::min());
+  VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int64_t>::max(),
+                                      std::numeric_limits<int64_t>::max());
+  VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<uint64_t>::max(),
+                                      std::numeric_limits<uint64_t>::max());
+  VerifySimpleAtoiGood<absl::int128>(
+      std::numeric_limits<absl::int128>::min(),
+      std::numeric_limits<absl::int128>::min());
+  VerifySimpleAtoiGood<absl::int128>(
+      std::numeric_limits<absl::int128>::max(),
+      std::numeric_limits<absl::int128>::max());
+  VerifySimpleAtoiBad<absl::int128>(std::numeric_limits<absl::uint128>::max());
+
   // Some other types
   VerifySimpleAtoiGood<int>(-42, -42);
   VerifySimpleAtoiGood<int32_t>(-42, -42);
   VerifySimpleAtoiGood<uint32_t>(42, 42);
   VerifySimpleAtoiGood<unsigned int>(42, 42);
   VerifySimpleAtoiGood<int64_t>(-42, -42);
-  VerifySimpleAtoiGood<long>(-42, -42);  // NOLINT(runtime/int)
+  VerifySimpleAtoiGood<long>(-42, -42);  // NOLINT: runtime-int
   VerifySimpleAtoiGood<uint64_t>(42, 42);
   VerifySimpleAtoiGood<size_t>(42, 42);
   VerifySimpleAtoiGood<std::string::size_type>(42, 42);
 }
 
 TEST(NumbersTest, Atod) {
+  // DBL_TRUE_MIN and FLT_TRUE_MIN were not mandated in <cfloat> before C++17.
+#if !defined(DBL_TRUE_MIN)
+  static constexpr double DBL_TRUE_MIN =
+      4.940656458412465441765687928682213723650598026143247644255856825e-324;
+#endif
+#if !defined(FLT_TRUE_MIN)
+  static constexpr float FLT_TRUE_MIN =
+      1.401298464324817070923729583289916131280261941876515771757068284e-45f;
+#endif
+
   double d;
-  EXPECT_TRUE(absl::SimpleAtod("nan", &d));
+  float f;
+
+  // NaN can be spelled in multiple ways.
+  EXPECT_TRUE(absl::SimpleAtod("NaN", &d));
   EXPECT_TRUE(std::isnan(d));
+  EXPECT_TRUE(absl::SimpleAtod("nAN", &d));
+  EXPECT_TRUE(std::isnan(d));
+  EXPECT_TRUE(absl::SimpleAtod("-nan", &d));
+  EXPECT_TRUE(std::isnan(d));
+
+  // Likewise for Infinity.
+  EXPECT_TRUE(absl::SimpleAtod("inf", &d));
+  EXPECT_TRUE(std::isinf(d) && (d > 0));
+  EXPECT_TRUE(absl::SimpleAtod("+Infinity", &d));
+  EXPECT_TRUE(std::isinf(d) && (d > 0));
+  EXPECT_TRUE(absl::SimpleAtod("-INF", &d));
+  EXPECT_TRUE(std::isinf(d) && (d < 0));
+
+  // Parse DBL_MAX. Parsing something more than twice as big should also
+  // produce infinity.
+  EXPECT_TRUE(absl::SimpleAtod("1.7976931348623157e+308", &d));
+  EXPECT_EQ(d, 1.7976931348623157e+308);
+  EXPECT_TRUE(absl::SimpleAtod("5e308", &d));
+  EXPECT_TRUE(std::isinf(d) && (d > 0));
+  // Ditto, but for FLT_MAX.
+  EXPECT_TRUE(absl::SimpleAtof("3.4028234663852886e+38", &f));
+  EXPECT_EQ(f, 3.4028234663852886e+38f);
+  EXPECT_TRUE(absl::SimpleAtof("7e38", &f));
+  EXPECT_TRUE(std::isinf(f) && (f > 0));
+
+  // Parse the largest N such that parsing 1eN produces a finite value and the
+  // smallest M = N + 1 such that parsing 1eM produces infinity.
+  //
+  // The 309 exponent (and 39) confirms the "definition of
+  // kEiselLemireMaxExclExp10" comment in charconv.cc.
+  EXPECT_TRUE(absl::SimpleAtod("1e308", &d));
+  EXPECT_EQ(d, 1e308);
+  EXPECT_FALSE(std::isinf(d));
+  EXPECT_TRUE(absl::SimpleAtod("1e309", &d));
+  EXPECT_TRUE(std::isinf(d));
+  // Ditto, but for Atof instead of Atod.
+  EXPECT_TRUE(absl::SimpleAtof("1e38", &f));
+  EXPECT_EQ(f, 1e38f);
+  EXPECT_FALSE(std::isinf(f));
+  EXPECT_TRUE(absl::SimpleAtof("1e39", &f));
+  EXPECT_TRUE(std::isinf(f));
+
+  // Parse the largest N such that parsing 9.999999999999999999eN, with 19
+  // nines, produces a finite value.
+  //
+  // 9999999999999999999, with 19 nines but no decimal point, is the largest
+  // "repeated nines" integer that fits in a uint64_t.
+  EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e307", &d));
+  EXPECT_EQ(d, 9.999999999999999999e307);
+  EXPECT_FALSE(std::isinf(d));
+  EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e308", &d));
+  EXPECT_TRUE(std::isinf(d));
+  // Ditto, but for Atof instead of Atod.
+  EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e37", &f));
+  EXPECT_EQ(f, 9.999999999999999999e37f);
+  EXPECT_FALSE(std::isinf(f));
+  EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e38", &f));
+  EXPECT_TRUE(std::isinf(f));
+
+  // Parse DBL_MIN (normal), DBL_TRUE_MIN (subnormal) and (DBL_TRUE_MIN / 10)
+  // (effectively zero).
+  EXPECT_TRUE(absl::SimpleAtod("2.2250738585072014e-308", &d));
+  EXPECT_EQ(d, 2.2250738585072014e-308);
+  EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-324", &d));
+  EXPECT_EQ(d, 4.9406564584124654e-324);
+  EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-325", &d));
+  EXPECT_EQ(d, 0);
+  // Ditto, but for FLT_MIN, FLT_TRUE_MIN and (FLT_TRUE_MIN / 10).
+  EXPECT_TRUE(absl::SimpleAtof("1.1754943508222875e-38", &f));
+  EXPECT_EQ(f, 1.1754943508222875e-38f);
+  EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-45", &f));
+  EXPECT_EQ(f, 1.4012984643248171e-45f);
+  EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-46", &f));
+  EXPECT_EQ(f, 0);
+
+  // Parse the largest N (the most negative -N) such that parsing 1e-N produces
+  // a normal or subnormal (but still positive) or zero value.
+  EXPECT_TRUE(absl::SimpleAtod("1e-307", &d));
+  EXPECT_EQ(d, 1e-307);
+  EXPECT_GE(d, DBL_MIN);
+  EXPECT_LT(d, DBL_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtod("1e-323", &d));
+  EXPECT_EQ(d, 1e-323);
+  EXPECT_GE(d, DBL_TRUE_MIN);
+  EXPECT_LT(d, DBL_TRUE_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtod("1e-324", &d));
+  EXPECT_EQ(d, 0);
+  // Ditto, but for Atof instead of Atod.
+  EXPECT_TRUE(absl::SimpleAtof("1e-37", &f));
+  EXPECT_EQ(f, 1e-37f);
+  EXPECT_GE(f, FLT_MIN);
+  EXPECT_LT(f, FLT_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtof("1e-45", &f));
+  EXPECT_EQ(f, 1e-45f);
+  EXPECT_GE(f, FLT_TRUE_MIN);
+  EXPECT_LT(f, FLT_TRUE_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtof("1e-46", &f));
+  EXPECT_EQ(f, 0);
+
+  // Parse the largest N (the most negative -N) such that parsing
+  // 9.999999999999999999e-N, with 19 nines, produces a normal or subnormal
+  // (but still positive) or zero value.
+  //
+  // 9999999999999999999, with 19 nines but no decimal point, is the largest
+  // "repeated nines" integer that fits in a uint64_t.
+  //
+  // The -324/-325 exponents (and -46/-47) confirms the "definition of
+  // kEiselLemireMinInclExp10" comment in charconv.cc.
+  EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-308", &d));
+  EXPECT_EQ(d, 9.999999999999999999e-308);
+  EXPECT_GE(d, DBL_MIN);
+  EXPECT_LT(d, DBL_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-324", &d));
+  EXPECT_EQ(d, 9.999999999999999999e-324);
+  EXPECT_GE(d, DBL_TRUE_MIN);
+  EXPECT_LT(d, DBL_TRUE_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-325", &d));
+  EXPECT_EQ(d, 0);
+  // Ditto, but for Atof instead of Atod.
+  EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-38", &f));
+  EXPECT_EQ(f, 9.999999999999999999e-38f);
+  EXPECT_GE(f, FLT_MIN);
+  EXPECT_LT(f, FLT_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-46", &f));
+  EXPECT_EQ(f, 9.999999999999999999e-46f);
+  EXPECT_GE(f, FLT_TRUE_MIN);
+  EXPECT_LT(f, FLT_TRUE_MIN * 10);
+  EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-47", &f));
+  EXPECT_EQ(f, 0);
+
+  // Leading and/or trailing whitespace is OK.
+  EXPECT_TRUE(absl::SimpleAtod("  \t\r\n  2.718", &d));
+  EXPECT_EQ(d, 2.718);
+  EXPECT_TRUE(absl::SimpleAtod("  3.141  ", &d));
+  EXPECT_EQ(d, 3.141);
+
+  // Leading or trailing not-whitespace is not OK.
+  EXPECT_FALSE(absl::SimpleAtod("n 0", &d));
+  EXPECT_FALSE(absl::SimpleAtod("0n ", &d));
+
+  // Multiple leading 0s are OK.
+  EXPECT_TRUE(absl::SimpleAtod("000123", &d));
+  EXPECT_EQ(d, 123);
+  EXPECT_TRUE(absl::SimpleAtod("000.456", &d));
+  EXPECT_EQ(d, 0.456);
+
+  // An absent leading 0 (for a fraction < 1) is OK.
+  EXPECT_TRUE(absl::SimpleAtod(".5", &d));
+  EXPECT_EQ(d, 0.5);
+  EXPECT_TRUE(absl::SimpleAtod("-.707", &d));
+  EXPECT_EQ(d, -0.707);
+
+  // Unary + is OK.
+  EXPECT_TRUE(absl::SimpleAtod("+6.0221408e+23", &d));
+  EXPECT_EQ(d, 6.0221408e+23);
+
+  // Underscores are not OK.
+  EXPECT_FALSE(absl::SimpleAtod("123_456", &d));
+
+  // The decimal separator must be '.' and is never ','.
+  EXPECT_TRUE(absl::SimpleAtod("8.9", &d));
+  EXPECT_FALSE(absl::SimpleAtod("8,9", &d));
+
+  // These examples are called out in the EiselLemire function's comments.
+  EXPECT_TRUE(absl::SimpleAtod("4503599627370497.5", &d));
+  EXPECT_EQ(d, 4503599627370497.5);
+  EXPECT_TRUE(absl::SimpleAtod("1e+23", &d));
+  EXPECT_EQ(d, 1e+23);
+  EXPECT_TRUE(absl::SimpleAtod("9223372036854775807", &d));
+  EXPECT_EQ(d, 9223372036854775807);
+  // Ditto, but for Atof instead of Atod.
+  EXPECT_TRUE(absl::SimpleAtof("0.0625", &f));
+  EXPECT_EQ(f, 0.0625f);
+  EXPECT_TRUE(absl::SimpleAtof("20040229.0", &f));
+  EXPECT_EQ(f, 20040229.0f);
+  EXPECT_TRUE(absl::SimpleAtof("2147483647.0", &f));
+  EXPECT_EQ(f, 2147483647.0f);
+
+  // Some parsing algorithms don't always round correctly (but absl::SimpleAtod
+  // should). This test case comes from
+  // https://github.com/serde-rs/json/issues/707
+  //
+  // See also atod_manual_test.cc for running many more test cases.
+  EXPECT_TRUE(absl::SimpleAtod("122.416294033786585", &d));
+  EXPECT_EQ(d, 122.416294033786585);
+  EXPECT_TRUE(absl::SimpleAtof("122.416294033786585", &f));
+  EXPECT_EQ(f, 122.416294033786585f);
+}
+
+TEST(NumbersTest, Prefixes) {
+  double d;
+  EXPECT_FALSE(absl::SimpleAtod("++1", &d));
+  EXPECT_FALSE(absl::SimpleAtod("+-1", &d));
+  EXPECT_FALSE(absl::SimpleAtod("-+1", &d));
+  EXPECT_FALSE(absl::SimpleAtod("--1", &d));
+  EXPECT_TRUE(absl::SimpleAtod("-1", &d));
+  EXPECT_EQ(d, -1.);
+  EXPECT_TRUE(absl::SimpleAtod("+1", &d));
+  EXPECT_EQ(d, +1.);
+
+  float f;
+  EXPECT_FALSE(absl::SimpleAtof("++1", &f));
+  EXPECT_FALSE(absl::SimpleAtof("+-1", &f));
+  EXPECT_FALSE(absl::SimpleAtof("-+1", &f));
+  EXPECT_FALSE(absl::SimpleAtof("--1", &f));
+  EXPECT_TRUE(absl::SimpleAtof("-1", &f));
+  EXPECT_EQ(f, -1.f);
+  EXPECT_TRUE(absl::SimpleAtof("+1", &f));
+  EXPECT_EQ(f, +1.f);
 }
 
 TEST(NumbersTest, Atoenum) {
@@ -419,6 +670,148 @@
   VerifySimpleAtoiGood<E_biguint>(E_biguint_max32, E_biguint_max32);
 }
 
+template <typename int_type, typename in_val_type>
+void VerifySimpleHexAtoiGood(in_val_type in_value, int_type exp_value) {
+  std::string s;
+  // uint128 can be streamed but not StrCat'd
+  absl::strings_internal::OStringStream strm(&s);
+  if (in_value >= 0) {
+    strm << std::hex << in_value;
+  } else {
+    // Inefficient for small integers, but works with all integral types.
+    strm << "-" << std::hex << -absl::uint128(in_value);
+  }
+  int_type x = static_cast<int_type>(~exp_value);
+  EXPECT_TRUE(SimpleHexAtoi(s, &x))
+      << "in_value=" << std::hex << in_value << " s=" << s << " x=" << x;
+  EXPECT_EQ(exp_value, x);
+  x = static_cast<int_type>(~exp_value);
+  EXPECT_TRUE(SimpleHexAtoi(
+      s.c_str(), &x));  // NOLINT: readability-redundant-string-conversions
+  EXPECT_EQ(exp_value, x);
+}
+
+template <typename int_type, typename in_val_type>
+void VerifySimpleHexAtoiBad(in_val_type in_value) {
+  std::string s;
+  // uint128 can be streamed but not StrCat'd
+  absl::strings_internal::OStringStream strm(&s);
+  if (in_value >= 0) {
+    strm << std::hex << in_value;
+  } else {
+    // Inefficient for small integers, but works with all integral types.
+    strm << "-" << std::hex << -absl::uint128(in_value);
+  }
+  int_type x;
+  EXPECT_FALSE(SimpleHexAtoi(s, &x));
+  EXPECT_FALSE(SimpleHexAtoi(
+      s.c_str(), &x));  // NOLINT: readability-redundant-string-conversions
+}
+
+TEST(NumbersTest, HexAtoi) {
+  // SimpleHexAtoi(absl::string_view, int32_t)
+  VerifySimpleHexAtoiGood<int32_t>(0, 0);
+  VerifySimpleHexAtoiGood<int32_t>(0x42, 0x42);
+  VerifySimpleHexAtoiGood<int32_t>(-0x42, -0x42);
+
+  VerifySimpleHexAtoiGood<int32_t>(std::numeric_limits<int32_t>::min(),
+                                   std::numeric_limits<int32_t>::min());
+  VerifySimpleHexAtoiGood<int32_t>(std::numeric_limits<int32_t>::max(),
+                                   std::numeric_limits<int32_t>::max());
+
+  // SimpleHexAtoi(absl::string_view, uint32_t)
+  VerifySimpleHexAtoiGood<uint32_t>(0, 0);
+  VerifySimpleHexAtoiGood<uint32_t>(0x42, 0x42);
+  VerifySimpleHexAtoiBad<uint32_t>(-0x42);
+
+  VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int32_t>::min());
+  VerifySimpleHexAtoiGood<uint32_t>(std::numeric_limits<int32_t>::max(),
+                                    std::numeric_limits<int32_t>::max());
+  VerifySimpleHexAtoiGood<uint32_t>(std::numeric_limits<uint32_t>::max(),
+                                    std::numeric_limits<uint32_t>::max());
+  VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int64_t>::min());
+  VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int64_t>::max());
+  VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<uint64_t>::max());
+
+  // SimpleHexAtoi(absl::string_view, int64_t)
+  VerifySimpleHexAtoiGood<int64_t>(0, 0);
+  VerifySimpleHexAtoiGood<int64_t>(0x42, 0x42);
+  VerifySimpleHexAtoiGood<int64_t>(-0x42, -0x42);
+
+  VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int32_t>::min(),
+                                   std::numeric_limits<int32_t>::min());
+  VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int32_t>::max(),
+                                   std::numeric_limits<int32_t>::max());
+  VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<uint32_t>::max(),
+                                   std::numeric_limits<uint32_t>::max());
+  VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int64_t>::min(),
+                                   std::numeric_limits<int64_t>::min());
+  VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int64_t>::max(),
+                                   std::numeric_limits<int64_t>::max());
+  VerifySimpleHexAtoiBad<int64_t>(std::numeric_limits<uint64_t>::max());
+
+  // SimpleHexAtoi(absl::string_view, uint64_t)
+  VerifySimpleHexAtoiGood<uint64_t>(0, 0);
+  VerifySimpleHexAtoiGood<uint64_t>(0x42, 0x42);
+  VerifySimpleHexAtoiBad<uint64_t>(-0x42);
+
+  VerifySimpleHexAtoiBad<uint64_t>(std::numeric_limits<int32_t>::min());
+  VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<int32_t>::max(),
+                                    std::numeric_limits<int32_t>::max());
+  VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<uint32_t>::max(),
+                                    std::numeric_limits<uint32_t>::max());
+  VerifySimpleHexAtoiBad<uint64_t>(std::numeric_limits<int64_t>::min());
+  VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<int64_t>::max(),
+                                    std::numeric_limits<int64_t>::max());
+  VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<uint64_t>::max(),
+                                    std::numeric_limits<uint64_t>::max());
+
+  // SimpleHexAtoi(absl::string_view, absl::uint128)
+  VerifySimpleHexAtoiGood<absl::uint128>(0, 0);
+  VerifySimpleHexAtoiGood<absl::uint128>(0x42, 0x42);
+  VerifySimpleHexAtoiBad<absl::uint128>(-0x42);
+
+  VerifySimpleHexAtoiBad<absl::uint128>(std::numeric_limits<int32_t>::min());
+  VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<int32_t>::max(),
+                                         std::numeric_limits<int32_t>::max());
+  VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<uint32_t>::max(),
+                                         std::numeric_limits<uint32_t>::max());
+  VerifySimpleHexAtoiBad<absl::uint128>(std::numeric_limits<int64_t>::min());
+  VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<int64_t>::max(),
+                                         std::numeric_limits<int64_t>::max());
+  VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<uint64_t>::max(),
+                                         std::numeric_limits<uint64_t>::max());
+  VerifySimpleHexAtoiGood<absl::uint128>(
+      std::numeric_limits<absl::uint128>::max(),
+      std::numeric_limits<absl::uint128>::max());
+
+  // Some other types
+  VerifySimpleHexAtoiGood<int>(-0x42, -0x42);
+  VerifySimpleHexAtoiGood<int32_t>(-0x42, -0x42);
+  VerifySimpleHexAtoiGood<uint32_t>(0x42, 0x42);
+  VerifySimpleHexAtoiGood<unsigned int>(0x42, 0x42);
+  VerifySimpleHexAtoiGood<int64_t>(-0x42, -0x42);
+  VerifySimpleHexAtoiGood<long>(-0x42, -0x42);  // NOLINT: runtime-int
+  VerifySimpleHexAtoiGood<uint64_t>(0x42, 0x42);
+  VerifySimpleHexAtoiGood<size_t>(0x42, 0x42);
+  VerifySimpleHexAtoiGood<std::string::size_type>(0x42, 0x42);
+
+  // Number prefix
+  int32_t value;
+  EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16));
+  EXPECT_EQ(0x34234324, value);
+
+  EXPECT_TRUE(safe_strto32_base("0X34234324", &value, 16));
+  EXPECT_EQ(0x34234324, value);
+
+  // ASCII whitespace
+  EXPECT_TRUE(safe_strto32_base(" \t\n 34234324", &value, 16));
+  EXPECT_EQ(0x34234324, value);
+
+  EXPECT_TRUE(safe_strto32_base("34234324 \t\n ", &value, 16));
+  EXPECT_EQ(0x34234324, value);
+}
+
 TEST(stringtest, safe_strto32_base) {
   int32_t value;
   EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16));
@@ -725,6 +1118,51 @@
     EXPECT_FALSE(parse_func(s, &parsed_value, base));
   }
 }
+TEST(stringtest, safe_strto128_random) {
+  // random number generators don't work for int128, and
+  // int128 can be streamed but not StrCat'd, so this code must be custom
+  // implemented for int128, but is generally the same as what's above.
+  // test_random_integer_parse_base<absl::int128>(
+  //     &absl::numbers_internal::safe_strto128_base);
+  using RandomEngine = std::minstd_rand0;
+  using IntType = absl::int128;
+  constexpr auto parse_func = &absl::numbers_internal::safe_strto128_base;
+
+  std::random_device rd;
+  RandomEngine rng(rd());
+  std::uniform_int_distribution<int64_t> random_int64(
+      std::numeric_limits<int64_t>::min());
+  std::uniform_int_distribution<uint64_t> random_uint64(
+      std::numeric_limits<uint64_t>::min());
+  std::uniform_int_distribution<int> random_base(2, 35);
+
+  for (size_t i = 0; i < kNumRandomTests; ++i) {
+    int64_t high = random_int64(rng);
+    uint64_t low = random_uint64(rng);
+    IntType value = absl::MakeInt128(high, low);
+
+    int base = random_base(rng);
+    std::string str_value;
+    EXPECT_TRUE(Itoa<IntType>(value, base, &str_value));
+    IntType parsed_value;
+
+    // Test successful parse
+    EXPECT_TRUE(parse_func(str_value, &parsed_value, base));
+    EXPECT_EQ(parsed_value, value);
+
+    // Test overflow
+    std::string s;
+    absl::strings_internal::OStringStream(&s)
+        << std::numeric_limits<IntType>::max() << value;
+    EXPECT_FALSE(parse_func(s, &parsed_value, base));
+
+    // Test underflow
+    s.clear();
+    absl::strings_internal::OStringStream(&s)
+        << std::numeric_limits<IntType>::min() << value;
+    EXPECT_FALSE(parse_func(s, &parsed_value, base));
+  }
+}
 
 TEST(stringtest, safe_strtou32_base) {
   for (int i = 0; strtouint32_test_cases()[i].str != nullptr; ++i) {
@@ -899,11 +1337,9 @@
     if (strcmp(sixdigitsbuf, snprintfbuf) != 0) {
       mismatches.push_back(d);
       if (mismatches.size() < 10) {
-        ABSL_RAW_LOG(ERROR, "%s",
-                     absl::StrCat("Six-digit failure with double.  ", "d=", d,
-                                  "=", d, " sixdigits=", sixdigitsbuf,
-                                  " printf(%g)=", snprintfbuf)
-                         .c_str());
+        LOG(ERROR) << "Six-digit failure with double.  d=" << d
+                   << " sixdigits=" << sixdigitsbuf
+                   << " printf(%g)=" << snprintfbuf;
       }
     }
   };
@@ -951,12 +1387,10 @@
       if (kFloatNumCases >= 1e9) {
         // The exhaustive test takes a very long time, so log progress.
         char buf[kSixDigitsToBufferSize];
-        ABSL_RAW_LOG(
-            INFO, "%s",
-            absl::StrCat("Exp ", exponent, " powten=", powten, "(", powten,
-                         ") (",
-                         std::string(buf, SixDigitsToBuffer(powten, buf)), ")")
-                .c_str());
+        LOG(INFO) << "Exp " << exponent << " powten=" << powten << "(" << powten
+                  << ") ("
+                  << absl::string_view(buf, SixDigitsToBuffer(powten, buf))
+                  << ")";
       }
       for (int digits : digit_testcases) {
         if (exponent == 308 && digits >= 179769) break;  // don't overflow!
@@ -981,20 +1415,17 @@
       double before = nextafter(d, 0.0);
       double after = nextafter(d, 1.7976931348623157e308);
       char b1[32], b2[kSixDigitsToBufferSize];
-      ABSL_RAW_LOG(
-          ERROR, "%s",
-          absl::StrCat(
-              "Mismatch #", i, "  d=", d, " (", ToNineDigits(d), ")",
-              " sixdigits='", sixdigitsbuf, "'", " snprintf='", snprintfbuf,
-              "'", " Before.=", PerfectDtoa(before), " ",
-              (SixDigitsToBuffer(before, b2), b2),
-              " vs snprintf=", (snprintf(b1, sizeof(b1), "%g", before), b1),
-              " Perfect=", PerfectDtoa(d), " ", (SixDigitsToBuffer(d, b2), b2),
-              " vs snprintf=", (snprintf(b1, sizeof(b1), "%g", d), b1),
-              " After.=.", PerfectDtoa(after), " ",
-              (SixDigitsToBuffer(after, b2), b2),
-              " vs snprintf=", (snprintf(b1, sizeof(b1), "%g", after), b1))
-              .c_str());
+      LOG(ERROR) << "Mismatch #" << i << "  d=" << d << " (" << ToNineDigits(d)
+                 << ") sixdigits='" << sixdigitsbuf << "' snprintf='"
+                 << snprintfbuf << "' Before.=" << PerfectDtoa(before) << " "
+                 << (SixDigitsToBuffer(before, b2), b2) << " vs snprintf="
+                 << (snprintf(b1, sizeof(b1), "%g", before), b1)
+                 << " Perfect=" << PerfectDtoa(d) << " "
+                 << (SixDigitsToBuffer(d, b2), b2)
+                 << " vs snprintf=" << (snprintf(b1, sizeof(b1), "%g", d), b1)
+                 << " After.=." << PerfectDtoa(after) << " "
+                 << (SixDigitsToBuffer(after, b2), b2) << " vs snprintf="
+                 << (snprintf(b1, sizeof(b1), "%g", after), b1);
     }
   }
 }
diff --git a/abseil-cpp/absl/strings/str_cat.cc b/abseil-cpp/absl/strings/str_cat.cc
index dd5d25b..2e49c31 100644
--- a/abseil-cpp/absl/strings/str_cat.cc
+++ b/abseil-cpp/absl/strings/str_cat.cc
@@ -17,65 +17,19 @@
 #include <assert.h>
 
 #include <algorithm>
+#include <cstddef>
 #include <cstdint>
 #include <cstring>
+#include <string>
 
 #include "absl/strings/ascii.h"
 #include "absl/strings/internal/resize_uninitialized.h"
 #include "absl/strings/numbers.h"
+#include "absl/strings/string_view.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-AlphaNum::AlphaNum(Hex hex) {
-  static_assert(numbers_internal::kFastToBufferSize >= 32,
-                "This function only works when output buffer >= 32 bytes long");
-  char* const end = &digits_[numbers_internal::kFastToBufferSize];
-  auto real_width =
-      absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
-  if (real_width >= hex.width) {
-    piece_ = absl::string_view(end - real_width, real_width);
-  } else {
-    // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
-    // max pad width can be up to 20.
-    std::memset(end - 32, hex.fill, 16);
-    // Patch up everything else up to the real_width.
-    std::memset(end - real_width - 16, hex.fill, 16);
-    piece_ = absl::string_view(end - hex.width, hex.width);
-  }
-}
-
-AlphaNum::AlphaNum(Dec dec) {
-  assert(dec.width <= numbers_internal::kFastToBufferSize);
-  char* const end = &digits_[numbers_internal::kFastToBufferSize];
-  char* const minfill = end - dec.width;
-  char* writer = end;
-  uint64_t value = dec.value;
-  bool neg = dec.neg;
-  while (value > 9) {
-    *--writer = '0' + (value % 10);
-    value /= 10;
-  }
-  *--writer = '0' + value;
-  if (neg) *--writer = '-';
-
-  ptrdiff_t fillers = writer - minfill;
-  if (fillers > 0) {
-    // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
-    // But...: if the fill character is '0', then it's <+/-><fill><digits>
-    bool add_sign_again = false;
-    if (neg && dec.fill == '0') {  // If filling with '0',
-      ++writer;                    // ignore the sign we just added
-      add_sign_again = true;       // and re-add the sign later.
-    }
-    writer -= fillers;
-    std::fill_n(writer, fillers, dec.fill);
-    if (add_sign_again) *--writer = '-';
-  }
-
-  piece_ = absl::string_view(writer, end - writer);
-}
-
 // ----------------------------------------------------------------------
 // StrCat()
 //    This merges the given strings or integers, with no delimiter. This
@@ -141,12 +95,12 @@
 std::string CatPieces(std::initializer_list<absl::string_view> pieces) {
   std::string result;
   size_t total_size = 0;
-  for (const absl::string_view& piece : pieces) total_size += piece.size();
+  for (absl::string_view piece : pieces) total_size += piece.size();
   strings_internal::STLStringResizeUninitialized(&result, total_size);
 
   char* const begin = &result[0];
   char* out = begin;
-  for (const absl::string_view& piece : pieces) {
+  for (absl::string_view piece : pieces) {
     const size_t this_size = piece.size();
     if (this_size != 0) {
       memcpy(out, piece.data(), this_size);
@@ -170,15 +124,15 @@
                   std::initializer_list<absl::string_view> pieces) {
   size_t old_size = dest->size();
   size_t total_size = old_size;
-  for (const absl::string_view& piece : pieces) {
+  for (absl::string_view piece : pieces) {
     ASSERT_NO_OVERLAP(*dest, piece);
     total_size += piece.size();
   }
-  strings_internal::STLStringResizeUninitialized(dest, total_size);
+  strings_internal::STLStringResizeUninitializedAmortized(dest, total_size);
 
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
-  for (const absl::string_view& piece : pieces) {
+  for (absl::string_view piece : pieces) {
     const size_t this_size = piece.size();
     if (this_size != 0) {
       memcpy(out, piece.data(), this_size);
@@ -192,14 +146,20 @@
 
 void StrAppend(std::string* dest, const AlphaNum& a) {
   ASSERT_NO_OVERLAP(*dest, a);
-  dest->append(a.data(), a.size());
+  std::string::size_type old_size = dest->size();
+  strings_internal::STLStringResizeUninitializedAmortized(dest,
+                                                          old_size + a.size());
+  char* const begin = &(*dest)[0];
+  char* out = begin + old_size;
+  out = Append(out, a);
+  assert(out == begin + dest->size());
 }
 
 void StrAppend(std::string* dest, const AlphaNum& a, const AlphaNum& b) {
   ASSERT_NO_OVERLAP(*dest, a);
   ASSERT_NO_OVERLAP(*dest, b);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringResizeUninitialized(
+  strings_internal::STLStringResizeUninitializedAmortized(
       dest, old_size + a.size() + b.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
@@ -214,7 +174,7 @@
   ASSERT_NO_OVERLAP(*dest, b);
   ASSERT_NO_OVERLAP(*dest, c);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringResizeUninitialized(
+  strings_internal::STLStringResizeUninitializedAmortized(
       dest, old_size + a.size() + b.size() + c.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
@@ -231,7 +191,7 @@
   ASSERT_NO_OVERLAP(*dest, c);
   ASSERT_NO_OVERLAP(*dest, d);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringResizeUninitialized(
+  strings_internal::STLStringResizeUninitializedAmortized(
       dest, old_size + a.size() + b.size() + c.size() + d.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
diff --git a/abseil-cpp/absl/strings/str_cat.h b/abseil-cpp/absl/strings/str_cat.h
index a8a85c7..d5f71ff 100644
--- a/abseil-cpp/absl/strings/str_cat.h
+++ b/abseil-cpp/absl/strings/str_cat.h
@@ -48,18 +48,58 @@
 // `StrCat()` or `StrAppend()`. You may specify a minimum hex field width using
 // a `PadSpec` enum.
 //
+// User-defined types can be formatted with the `AbslStringify()` customization
+// point. The API relies on detecting an overload in the user-defined type's
+// namespace of a free (non-member) `AbslStringify()` function as a definition
+// (typically declared as a friend and implemented in-line.
+// with the following signature:
+//
+// class MyClass { ... };
+//
+// template <typename Sink>
+// void AbslStringify(Sink& sink, const MyClass& value);
+//
+// An `AbslStringify()` overload for a type should only be declared in the same
+// file and namespace as said type.
+//
+// Note that `AbslStringify()` also supports use with `absl::StrFormat()` and
+// `absl::Substitute()`.
+//
+// Example:
+//
+// struct Point {
+//   // To add formatting support to `Point`, we simply need to add a free
+//   // (non-member) function `AbslStringify()`. This method specifies how
+//   // Point should be printed when absl::StrCat() is called on it. You can add
+//   // such a free function using a friend declaration within the body of the
+//   // class. The sink parameter is a templated type to avoid requiring
+//   // dependencies.
+//   template <typename Sink> friend void AbslStringify(Sink&
+//   sink, const Point& p) {
+//     absl::Format(&sink, "(%v, %v)", p.x, p.y);
+//   }
+//
+//   int x;
+//   int y;
+// };
 // -----------------------------------------------------------------------------
 
 #ifndef ABSL_STRINGS_STR_CAT_H_
 #define ABSL_STRINGS_STR_CAT_H_
 
+#include <algorithm>
 #include <array>
 #include <cstdint>
+#include <cstring>
 #include <string>
 #include <type_traits>
+#include <utility>
 #include <vector>
 
+#include "absl/base/attributes.h"
 #include "absl/base/port.h"
+#include "absl/strings/internal/has_absl_stringify.h"
+#include "absl/strings/internal/stringify_sink.h"
 #include "absl/strings/numbers.h"
 #include "absl/strings/string_view.h"
 
@@ -164,6 +204,27 @@
   explicit Hex(Pointee* v, PadSpec spec = absl::kNoPad)
       : Hex(spec, reinterpret_cast<uintptr_t>(v)) {}
 
+  template <typename S>
+  friend void AbslStringify(S& sink, Hex hex) {
+    static_assert(
+        numbers_internal::kFastToBufferSize >= 32,
+        "This function only works when output buffer >= 32 bytes long");
+    char buffer[numbers_internal::kFastToBufferSize];
+    char* const end = &buffer[numbers_internal::kFastToBufferSize];
+    auto real_width =
+        absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
+    if (real_width >= hex.width) {
+      sink.Append(absl::string_view(end - real_width, real_width));
+    } else {
+      // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
+      // max pad width can be up to 20.
+      std::memset(end - 32, hex.fill, 16);
+      // Patch up everything else up to the real_width.
+      std::memset(end - real_width - 16, hex.fill, 16);
+      sink.Append(absl::string_view(end - hex.width, hex.width));
+    }
+  }
+
  private:
   Hex(PadSpec spec, uint64_t v)
       : value(v),
@@ -198,6 +259,38 @@
                                              : spec - absl::kZeroPad2 + 2),
         fill(spec >= absl::kSpacePad2 ? ' ' : '0'),
         neg(v < 0) {}
+
+  template <typename S>
+  friend void AbslStringify(S& sink, Dec dec) {
+    assert(dec.width <= numbers_internal::kFastToBufferSize);
+    char buffer[numbers_internal::kFastToBufferSize];
+    char* const end = &buffer[numbers_internal::kFastToBufferSize];
+    char* const minfill = end - dec.width;
+    char* writer = end;
+    uint64_t val = dec.value;
+    while (val > 9) {
+      *--writer = '0' + (val % 10);
+      val /= 10;
+    }
+    *--writer = '0' + static_cast<char>(val);
+    if (dec.neg) *--writer = '-';
+
+    ptrdiff_t fillers = writer - minfill;
+    if (fillers > 0) {
+      // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
+      // But...: if the fill character is '0', then it's <+/-><fill><digits>
+      bool add_sign_again = false;
+      if (dec.neg && dec.fill == '0') {  // If filling with '0',
+        ++writer;                    // ignore the sign we just added
+        add_sign_again = true;       // and re-add the sign later.
+      }
+      writer -= fillers;
+      std::fill_n(writer, fillers, dec.fill);
+      if (add_sign_again) *--writer = '-';
+    }
+
+    sink.Append(absl::string_view(writer, static_cast<size_t>(end - writer)));
+  }
 };
 
 // -----------------------------------------------------------------------------
@@ -205,8 +298,10 @@
 // -----------------------------------------------------------------------------
 //
 // The `AlphaNum` class acts as the main parameter type for `StrCat()` and
-// `StrAppend()`, providing efficient conversion of numeric, boolean, and
-// hexadecimal values (through the `Hex` type) into strings.
+// `StrAppend()`, providing efficient conversion of numeric, boolean, decimal,
+// and hexadecimal values (through the `Dec` and `Hex` types) into strings.
+// `AlphaNum` should only be used as a function parameter. Do not instantiate
+//  `AlphaNum` directly as a stack variable.
 
 class AlphaNum {
  public:
@@ -214,43 +309,59 @@
   // A bool ctor would also convert incoming pointers (bletch).
 
   AlphaNum(int x)  // NOLINT(runtime/explicit)
-      : piece_(digits_,
-               numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+      : piece_(digits_, static_cast<size_t>(
+                            numbers_internal::FastIntToBuffer(x, digits_) -
+                            &digits_[0])) {}
   AlphaNum(unsigned int x)  // NOLINT(runtime/explicit)
-      : piece_(digits_,
-               numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+      : piece_(digits_, static_cast<size_t>(
+                            numbers_internal::FastIntToBuffer(x, digits_) -
+                            &digits_[0])) {}
   AlphaNum(long x)  // NOLINT(*)
-      : piece_(digits_,
-               numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+      : piece_(digits_, static_cast<size_t>(
+                            numbers_internal::FastIntToBuffer(x, digits_) -
+                            &digits_[0])) {}
   AlphaNum(unsigned long x)  // NOLINT(*)
-      : piece_(digits_,
-               numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+      : piece_(digits_, static_cast<size_t>(
+                            numbers_internal::FastIntToBuffer(x, digits_) -
+                            &digits_[0])) {}
   AlphaNum(long long x)  // NOLINT(*)
-      : piece_(digits_,
-               numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+      : piece_(digits_, static_cast<size_t>(
+                            numbers_internal::FastIntToBuffer(x, digits_) -
+                            &digits_[0])) {}
   AlphaNum(unsigned long long x)  // NOLINT(*)
-      : piece_(digits_,
-               numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+      : piece_(digits_, static_cast<size_t>(
+                            numbers_internal::FastIntToBuffer(x, digits_) -
+                            &digits_[0])) {}
 
   AlphaNum(float f)  // NOLINT(runtime/explicit)
       : piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
   AlphaNum(double f)  // NOLINT(runtime/explicit)
       : piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
 
-  AlphaNum(Hex hex);  // NOLINT(runtime/explicit)
-  AlphaNum(Dec dec);  // NOLINT(runtime/explicit)
-
   template <size_t size>
   AlphaNum(  // NOLINT(runtime/explicit)
-      const strings_internal::AlphaNumBuffer<size>& buf)
+      const strings_internal::AlphaNumBuffer<size>& buf
+          ABSL_ATTRIBUTE_LIFETIME_BOUND)
       : piece_(&buf.data[0], buf.size) {}
 
-  AlphaNum(const char* c_str) : piece_(c_str) {}  // NOLINT(runtime/explicit)
-  AlphaNum(absl::string_view pc) : piece_(pc) {}  // NOLINT(runtime/explicit)
+  AlphaNum(const char* c_str  // NOLINT(runtime/explicit)
+               ABSL_ATTRIBUTE_LIFETIME_BOUND)
+      : piece_(NullSafeStringView(c_str)) {}
+  AlphaNum(absl::string_view pc  // NOLINT(runtime/explicit)
+               ABSL_ATTRIBUTE_LIFETIME_BOUND)
+      : piece_(pc) {}
+
+  template <typename T, typename = typename std::enable_if<
+                            strings_internal::HasAbslStringify<T>::value>::type>
+  AlphaNum(  // NOLINT(runtime/explicit)
+      const T& v ABSL_ATTRIBUTE_LIFETIME_BOUND,
+      strings_internal::StringifySink&& sink ABSL_ATTRIBUTE_LIFETIME_BOUND = {})
+      : piece_(strings_internal::ExtractStringification(sink, v)) {}
 
   template <typename Allocator>
   AlphaNum(  // NOLINT(runtime/explicit)
-      const std::basic_string<char, std::char_traits<char>, Allocator>& str)
+      const std::basic_string<char, std::char_traits<char>, Allocator>& str
+          ABSL_ATTRIBUTE_LIFETIME_BOUND)
       : piece_(str) {}
 
   // Use string literals ":" instead of character literals ':'.
@@ -263,13 +374,24 @@
   const char* data() const { return piece_.data(); }
   absl::string_view Piece() const { return piece_; }
 
-  // Normal enums are already handled by the integer formatters.
-  // This overload matches only scoped enums.
+  // Match unscoped enums.  Use integral promotion so that a `char`-backed
+  // enum becomes a wider integral type AlphaNum will accept.
   template <typename T,
             typename = typename std::enable_if<
-                std::is_enum<T>{} && !std::is_convertible<T, int>{}>::type>
+                std::is_enum<T>{} && std::is_convertible<T, int>{} &&
+                !strings_internal::HasAbslStringify<T>::value>::type>
   AlphaNum(T e)  // NOLINT(runtime/explicit)
-      : AlphaNum(static_cast<typename std::underlying_type<T>::type>(e)) {}
+      : AlphaNum(+e) {}
+
+  // This overload matches scoped enums.  We must explicitly cast to the
+  // underlying type, but use integral promotion for the same reason as above.
+  template <typename T,
+            typename std::enable_if<
+                std::is_enum<T>{} && !std::is_convertible<T, int>{} &&
+                    !strings_internal::HasAbslStringify<T>::value,
+                char*>::type = nullptr>
+  AlphaNum(T e)  // NOLINT(runtime/explicit)
+      : AlphaNum(+static_cast<typename std::underlying_type<T>::type>(e)) {}
 
   // vector<bool>::reference and const_reference require special help to
   // convert to `AlphaNum` because it requires two user defined conversions.
diff --git a/abseil-cpp/absl/strings/str_cat_test.cc b/abseil-cpp/absl/strings/str_cat_test.cc
index f3770dc..2d74245 100644
--- a/abseil-cpp/absl/strings/str_cat_test.cc
+++ b/abseil-cpp/absl/strings/str_cat_test.cc
@@ -21,6 +21,7 @@
 #include <vector>
 
 #include "gtest/gtest.h"
+#include "absl/strings/str_format.h"
 #include "absl/strings/substitute.h"
 
 #ifdef __ANDROID__
@@ -210,6 +211,11 @@
   EXPECT_EQ(result, "");
 }
 
+TEST(StrCat, NullConstCharPtr) {
+  const char* null = nullptr;
+  EXPECT_EQ(absl::StrCat("mon", null, "key"), "monkey");
+}
+
 // A minimal allocator that uses malloc().
 template <typename T>
 struct Mallocator {
@@ -437,7 +443,7 @@
   EXPECT_EQ(result, "12345");
 }
 
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
 TEST(StrAppend, Death) {
   std::string s = "self";
   // on linux it's "assertion", on mac it's "Assertion",
@@ -607,4 +613,53 @@
   TestFastPrints();
 }
 
+struct PointStringify {
+  template <typename FormatSink>
+  friend void AbslStringify(FormatSink& sink, const PointStringify& p) {
+    sink.Append("(");
+    sink.Append(absl::StrCat(p.x));
+    sink.Append(", ");
+    sink.Append(absl::StrCat(p.y));
+    sink.Append(")");
+  }
+
+  double x = 10.0;
+  double y = 20.0;
+};
+
+TEST(StrCat, AbslStringifyExample) {
+  PointStringify p;
+  EXPECT_EQ(absl::StrCat(p), "(10, 20)");
+  EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z");
+}
+
+struct PointStringifyUsingFormat {
+  template <typename FormatSink>
+  friend void AbslStringify(FormatSink& sink,
+                            const PointStringifyUsingFormat& p) {
+    absl::Format(&sink, "(%g, %g)", p.x, p.y);
+  }
+
+  double x = 10.0;
+  double y = 20.0;
+};
+
+TEST(StrCat, AbslStringifyExampleUsingFormat) {
+  PointStringifyUsingFormat p;
+  EXPECT_EQ(absl::StrCat(p), "(10, 20)");
+  EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z");
+}
+
+enum class EnumWithStringify { Many = 0, Choices = 1 };
+
+template <typename Sink>
+void AbslStringify(Sink& sink, EnumWithStringify e) {
+  absl::Format(&sink, "%s", e == EnumWithStringify::Many ? "Many" : "Choices");
+}
+
+TEST(StrCat, AbslStringifyWithEnum) {
+  const auto e = EnumWithStringify::Choices;
+  EXPECT_EQ(absl::StrCat(e), "Choices");
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/strings/str_format.h b/abseil-cpp/absl/strings/str_format.h
index 0146510..023e435 100644
--- a/abseil-cpp/absl/strings/str_format.h
+++ b/abseil-cpp/absl/strings/str_format.h
@@ -36,10 +36,12 @@
 //   * `absl::StreamFormat()` to more efficiently write a format string to a
 //     stream, such as`std::cout`.
 //   * `absl::PrintF()`, `absl::FPrintF()` and `absl::SNPrintF()` as
-//     replacements for `std::printf()`, `std::fprintf()` and `std::snprintf()`.
+//     drop-in replacements for `std::printf()`, `std::fprintf()` and
+//     `std::snprintf()`.
 //
-//     Note: a version of `std::sprintf()` is not supported as it is
-//     generally unsafe due to buffer overflows.
+//     Note: An `absl::SPrintF()` drop-in replacement is not supported as it
+//     is generally unsafe due to buffer overflows. Use `absl::StrFormat` which
+//     returns the string as output instead of expecting a pre-allocated buffer.
 //
 // Additionally, you can provide a format string (and its associated arguments)
 // using one of the following abstractions:
@@ -191,9 +193,9 @@
 //   absl::StrFormat(formatString, "TheVillage", 6);
 //
 // A format string generally follows the POSIX syntax as used within the POSIX
-// `printf` specification.
+// `printf` specification. (Exceptions are noted below.)
 //
-// (See http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html.)
+// (See http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html)
 //
 // In specific, the `FormatSpec` supports the following type specifiers:
 //   * `c` for characters
@@ -211,6 +213,10 @@
 //   * `n` for the special case of writing out the number of characters
 //     written to this point. The resulting value must be captured within an
 //     `absl::FormatCountCapture` type.
+//   * `v` for values using the default format for a deduced type. These deduced
+//     types include many of the primitive types denoted here as well as
+//     user-defined types containing the proper extensions. (See below for more
+//     information.)
 //
 // Implementation-defined behavior:
 //   * A null pointer provided to "%s" or "%p" is output as "(nil)".
@@ -239,11 +245,21 @@
 //         "%s%d%n", "hello", 123, absl::FormatCountCapture(&n));
 //     EXPECT_EQ(8, n);
 //
+// NOTE: the `v` specifier (for "value") is a type specifier not present in the
+// POSIX specification. %v will format values according to their deduced type.
+// `v` uses `d` for signed integer values, `u` for unsigned integer values, `g`
+// for floating point values, and formats boolean values as "true"/"false"
+// (instead of 1 or 0 for booleans formatted using d). `const char*` is not
+// supported; please use `std:string` and `string_view`. `char` is also not
+// supported due to ambiguity of the type. This specifier does not support
+// modifiers.
+//
 // The `FormatSpec` intrinsically supports all of these fundamental C++ types:
 //
 // *   Characters: `char`, `signed char`, `unsigned char`
 // *   Integers: `int`, `short`, `unsigned short`, `unsigned`, `long`,
 //         `unsigned long`, `long long`, `unsigned long long`
+// *   Enums: printed as their underlying integral value
 // *   Floating-point: `float`, `double`, `long double`
 //
 // However, in the `str_format` library, a format conversion specifies a broader
@@ -536,8 +552,7 @@
 // The arguments are provided in an `absl::Span<const absl::FormatArg>`.
 // Each `absl::FormatArg` object binds to a single argument and keeps a
 // reference to it. The values used to create the `FormatArg` objects must
-// outlive this function call. (See `str_format_arg.h` for information on
-// the `FormatArg` class.)_
+// outlive this function call.
 //
 // Example:
 //
@@ -571,6 +586,41 @@
 // StrFormat Extensions
 //------------------------------------------------------------------------------
 //
+// AbslStringify()
+//
+// A simpler customization API for formatting user-defined types using
+// absl::StrFormat(). The API relies on detecting an overload in the
+// user-defined type's namespace of a free (non-member) `AbslStringify()`
+// function as a friend definition with the following signature:
+//
+// template <typename Sink>
+// void AbslStringify(Sink& sink, const X& value);
+//
+// An `AbslStringify()` overload for a type should only be declared in the same
+// file and namespace as said type.
+//
+// Note that unlike with AbslFormatConvert(), AbslStringify() does not allow
+// customization of allowed conversion characters. AbslStringify() uses `%v` as
+// the underlying conversion specififer. Additionally, AbslStringify() supports
+// use with absl::StrCat while AbslFormatConvert() does not.
+//
+// Example:
+//
+// struct Point {
+//   // To add formatting support to `Point`, we simply need to add a free
+//   // (non-member) function `AbslStringify()`. This method prints in the
+//   // request format using the underlying `%v` specifier. You can add such a
+//   // free function using a friend declaration within the body of the class.
+//   // The sink parameter is a templated type to avoid requiring dependencies.
+//   template <typename Sink>
+//   friend void AbslStringify(Sink& sink, const Point& p) {
+//     absl::Format(&sink, "(%v, %v)", p.x, p.y);
+//   }
+//
+//   int x;
+//   int y;
+// };
+//
 // AbslFormatConvert()
 //
 // The StrFormat library provides a customization API for formatting
@@ -617,9 +667,9 @@
 //   AbslFormatConvert(const Point& p, const absl::FormatConversionSpec& spec,
 //                     absl::FormatSink* s) {
 //     if (spec.conversion_char() == absl::FormatConversionChar::s) {
-//       s->Append(absl::StrCat("x=", p.x, " y=", p.y));
+//       absl::Format(s, "x=%vy=%v", p.x, p.y);
 //     } else {
-//       s->Append(absl::StrCat(p.x, ",", p.y));
+//       absl::Format(s, "%v,%v", p.x, p.y);
 //     }
 //     return {true};
 //   }
@@ -638,7 +688,7 @@
   c, s,                    // text
   d, i, o, u, x, X,        // int
   f, F, e, E, g, G, a, A,  // float
-  n, p                     // misc
+  n, p, v                  // misc
 };
 // clang-format on
 
@@ -758,6 +808,7 @@
   // misc
   n = str_format_internal::FormatConversionCharToConvInt('n'),
   p = str_format_internal::FormatConversionCharToConvInt('p'),
+  v = str_format_internal::FormatConversionCharToConvInt('v'),
 
   // Used for width/precision '*' specification.
   kStar = static_cast<uint64_t>(
@@ -772,23 +823,36 @@
 
 // FormatSink
 //
-// An abstraction to which conversions write their string data.
+// A format sink is a generic abstraction to which conversions may write their
+// formatted string data. `absl::FormatConvert()` uses this sink to write its
+// formatted string.
 //
 class FormatSink {
  public:
-  // Appends `count` copies of `ch`.
+  // FormatSink::Append()
+  //
+  // Appends `count` copies of `ch` to the format sink.
   void Append(size_t count, char ch) { sink_->Append(count, ch); }
 
+  // Overload of FormatSink::Append() for appending the characters of a string
+  // view to a format sink.
   void Append(string_view v) { sink_->Append(v); }
 
-  // Appends the first `precision` bytes of `v`. If this is less than
-  // `width`, spaces will be appended first (if `left` is false), or
+  // FormatSink::PutPaddedString()
+  //
+  // Appends `precision` number of bytes of `v` to the format sink. If this is
+  // less than `width`, spaces will be appended first (if `left` is false), or
   // after (if `left` is true) to ensure the total amount appended is
   // at least `width`.
   bool PutPaddedString(string_view v, int width, int precision, bool left) {
     return sink_->PutPaddedString(v, width, precision, left);
   }
 
+  // Support `absl::Format(&sink, format, args...)`.
+  friend void AbslFormatFlush(FormatSink* sink, absl::string_view v) {
+    sink->Append(v);
+  }
+
  private:
   friend str_format_internal::FormatSinkImpl;
   explicit FormatSink(str_format_internal::FormatSinkImpl* s) : sink_(s) {}
diff --git a/abseil-cpp/absl/strings/str_format_test.cc b/abseil-cpp/absl/strings/str_format_test.cc
index d9fb25a..20fd028 100644
--- a/abseil-cpp/absl/strings/str_format_test.cc
+++ b/abseil-cpp/absl/strings/str_format_test.cc
@@ -1,3 +1,16 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 #include "absl/strings/str_format.h"
 
@@ -29,6 +42,18 @@
   EXPECT_TRUE(Format(&sink, pc, 123));
   EXPECT_EQ("A format 123", sink);
 }
+
+TEST_F(FormatEntryPointTest, FormatWithV) {
+  std::string sink;
+  EXPECT_TRUE(Format(&sink, "A format %v", 123));
+  EXPECT_EQ("A format 123", sink);
+  sink.clear();
+
+  ParsedFormat<'v'> pc("A format %v");
+  EXPECT_TRUE(Format(&sink, pc, 123));
+  EXPECT_EQ("A format 123", sink);
+}
+
 TEST_F(FormatEntryPointTest, UntypedFormat) {
   constexpr const char* formats[] = {
     "",
@@ -71,6 +96,14 @@
   EXPECT_EQ("=123=", StrFormat(view, 123));
 }
 
+TEST_F(FormatEntryPointTest, StringFormatV) {
+  std::string hello = "hello";
+  EXPECT_EQ("hello", StrFormat("%v", hello));
+  EXPECT_EQ("123", StrFormat("%v", 123));
+  constexpr absl::string_view view("=%v=", 4);
+  EXPECT_EQ("=123=", StrFormat(view, 123));
+}
+
 TEST_F(FormatEntryPointTest, AppendFormat) {
   std::string s;
   std::string& r = StrAppendFormat(&s, "%d", 123);
@@ -78,6 +111,13 @@
   EXPECT_EQ("123", r);
 }
 
+TEST_F(FormatEntryPointTest, AppendFormatWithV) {
+  std::string s;
+  std::string& r = StrAppendFormat(&s, "%v", 123);
+  EXPECT_EQ(&s, &r);  // should be same object
+  EXPECT_EQ("123", r);
+}
+
 TEST_F(FormatEntryPointTest, AppendFormatFail) {
   std::string s = "orig";
 
@@ -90,15 +130,33 @@
                 {&arg, 1}));
 }
 
+TEST_F(FormatEntryPointTest, AppendFormatFailWithV) {
+  std::string s = "orig";
+
+  UntypedFormatSpec format(" more %v");
+  FormatArgImpl arg("not an int");
+
+  EXPECT_EQ("orig",
+            str_format_internal::AppendPack(
+                &s, str_format_internal::UntypedFormatSpecImpl::Extract(format),
+                {&arg, 1}));
+}
 
 TEST_F(FormatEntryPointTest, ManyArgs) {
-  EXPECT_EQ("24", StrFormat("%24$d", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
-                            14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24));
-  EXPECT_EQ("60", StrFormat("%60$d", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
-                            14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
-                            27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
-                            40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
-                            53, 54, 55, 56, 57, 58, 59, 60));
+  EXPECT_EQ(
+      "60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 "
+      "36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 "
+      "12 11 10 9 8 7 6 5 4 3 2 1",
+      StrFormat("%60$d %59$d %58$d %57$d %56$d %55$d %54$d %53$d %52$d %51$d "
+                "%50$d %49$d %48$d %47$d %46$d %45$d %44$d %43$d %42$d %41$d "
+                "%40$d %39$d %38$d %37$d %36$d %35$d %34$d %33$d %32$d %31$d "
+                "%30$d %29$d %28$d %27$d %26$d %25$d %24$d %23$d %22$d %21$d "
+                "%20$d %19$d %18$d %17$d %16$d %15$d %14$d %13$d %12$d %11$d "
+                "%10$d %9$d %8$d %7$d %6$d %5$d %4$d %3$d %2$d %1$d",
+                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+                35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+                51, 52, 53, 54, 55, 56, 57, 58, 59, 60));
 }
 
 TEST_F(FormatEntryPointTest, Preparsed) {
@@ -110,6 +168,15 @@
   EXPECT_EQ("=123=", StrFormat(ParsedFormat<'d'>(view), 123));
 }
 
+TEST_F(FormatEntryPointTest, PreparsedWithV) {
+  ParsedFormat<'v'> pc("%v");
+  EXPECT_EQ("123", StrFormat(pc, 123));
+  // rvalue ok?
+  EXPECT_EQ("123", StrFormat(ParsedFormat<'v'>("%v"), 123));
+  constexpr absl::string_view view("=%v=", 4);
+  EXPECT_EQ("=123=", StrFormat(ParsedFormat<'v'>(view), 123));
+}
+
 TEST_F(FormatEntryPointTest, FormatCountCapture) {
   int n = 0;
   EXPECT_EQ("", StrFormat("%n", FormatCountCapture(&n)));
@@ -118,6 +185,14 @@
   EXPECT_EQ(3, n);
 }
 
+TEST_F(FormatEntryPointTest, FormatCountCaptureWithV) {
+  int n = 0;
+  EXPECT_EQ("", StrFormat("%n", FormatCountCapture(&n)));
+  EXPECT_EQ(0, n);
+  EXPECT_EQ("123", StrFormat("%v%n", 123, FormatCountCapture(&n)));
+  EXPECT_EQ(3, n);
+}
+
 TEST_F(FormatEntryPointTest, FormatCountCaptureWrongType) {
   // Should reject int*.
   int n = 0;
@@ -130,6 +205,18 @@
                     absl::MakeSpan(args)));
 }
 
+TEST_F(FormatEntryPointTest, FormatCountCaptureWrongTypeWithV) {
+  // Should reject int*.
+  int n = 0;
+  UntypedFormatSpec format("%v%n");
+  int i = 123, *ip = &n;
+  FormatArgImpl args[2] = {FormatArgImpl(i), FormatArgImpl(ip)};
+
+  EXPECT_EQ("", str_format_internal::FormatPack(
+                    str_format_internal::UntypedFormatSpecImpl::Extract(format),
+                    absl::MakeSpan(args)));
+}
+
 TEST_F(FormatEntryPointTest, FormatCountCaptureMultiple) {
   int n1 = 0;
   int n2 = 0;
@@ -152,6 +239,21 @@
       s);
 }
 
+TEST_F(FormatEntryPointTest, FormatCountCaptureExampleWithV) {
+  int n;
+  std::string s;
+  std::string a1 = "(1,1)";
+  std::string a2 = "(1,2)";
+  std::string a3 = "(2,2)";
+  StrAppendFormat(&s, "%v: %n%v\n", a1, FormatCountCapture(&n), a2);
+  StrAppendFormat(&s, "%*s%v\n", n, "", a3);
+  EXPECT_EQ(7, n);
+  EXPECT_EQ(
+      "(1,1): (1,2)\n"
+      "       (2,2)\n",
+      s);
+}
+
 TEST_F(FormatEntryPointTest, Stream) {
   const std::string formats[] = {
     "",
@@ -170,7 +272,7 @@
     std::ostringstream oss;
     oss << StreamFormat(*parsed, 123, 3, 49, "multistreaming!!!", 1.01, 1.01);
     int fmt_result = snprintf(&*buf.begin(), buf.size(), fmt.c_str(),  //
-                                 123, 3, 49, "multistreaming!!!", 1.01, 1.01);
+                              123, 3, 49, "multistreaming!!!", 1.01, 1.01);
     ASSERT_TRUE(oss) << fmt;
     ASSERT_TRUE(fmt_result >= 0 && static_cast<size_t>(fmt_result) < buf.size())
         << fmt_result;
@@ -178,6 +280,36 @@
   }
 }
 
+TEST_F(FormatEntryPointTest, StreamWithV) {
+  const std::string formats[] = {
+      "",
+      "a",
+      "%v %u %c %v %f %v",
+  };
+
+  const std::string formats_for_buf[] = {
+      "",
+      "a",
+      "%d %u %c %s %f %g",
+  };
+
+  std::string buf(4096, '\0');
+  for (auto i = 0; i < ABSL_ARRAYSIZE(formats); ++i) {
+    const auto parsed =
+        ParsedFormat<'v', 'u', 'c', 'v', 'f', 'v'>::NewAllowIgnored(formats[i]);
+    std::ostringstream oss;
+    oss << StreamFormat(*parsed, 123, 3, 49,
+                        absl::string_view("multistreaming!!!"), 1.01, 1.01);
+    int fmt_result =
+        snprintf(&*buf.begin(), buf.size(), formats_for_buf[i].c_str(),  //
+                 123, 3, 49, "multistreaming!!!", 1.01, 1.01);
+    ASSERT_TRUE(oss) << formats[i];
+    ASSERT_TRUE(fmt_result >= 0 && static_cast<size_t>(fmt_result) < buf.size())
+        << fmt_result;
+    EXPECT_EQ(buf.c_str(), oss.str());
+  }
+}
+
 TEST_F(FormatEntryPointTest, StreamOk) {
   std::ostringstream oss;
   oss << StreamFormat("hello %d", 123);
@@ -185,6 +317,13 @@
   EXPECT_TRUE(oss.good());
 }
 
+TEST_F(FormatEntryPointTest, StreamOkWithV) {
+  std::ostringstream oss;
+  oss << StreamFormat("hello %v", 123);
+  EXPECT_EQ("hello 123", oss.str());
+  EXPECT_TRUE(oss.good());
+}
+
 TEST_F(FormatEntryPointTest, StreamFail) {
   std::ostringstream oss;
   UntypedFormatSpec format("hello %d");
@@ -195,6 +334,16 @@
   EXPECT_TRUE(oss.fail());
 }
 
+TEST_F(FormatEntryPointTest, StreamFailWithV) {
+  std::ostringstream oss;
+  UntypedFormatSpec format("hello %v");
+  FormatArgImpl arg("non-numeric");
+  oss << str_format_internal::Streamable(
+      str_format_internal::UntypedFormatSpecImpl::Extract(format), {&arg, 1});
+  EXPECT_EQ("hello ", oss.str());  // partial write
+  EXPECT_TRUE(oss.fail());
+}
+
 std::string WithSnprintf(const char* fmt, ...) {
   std::string buf;
   buf.resize(128);
@@ -236,6 +385,12 @@
   EXPECT_EQ("123", StrFormat("%s", FormatStreamed(StreamFormat("%d", 123))));
 }
 
+TEST_F(FormatEntryPointTest, FormatStreamedWithV) {
+  EXPECT_EQ("123", StrFormat("%v", FormatStreamed(123)));
+  EXPECT_EQ("X", StrFormat("%v", FormatStreamed(streamed_test::X())));
+  EXPECT_EQ("123", StrFormat("%v", FormatStreamed(StreamFormat("%d", 123))));
+}
+
 // Helper class that creates a temporary file and exposes a FILE* to it.
 // It will close the file on destruction.
 class TempFile {
@@ -271,6 +426,14 @@
   EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019");
 }
 
+TEST_F(FormatEntryPointTest, FPrintFWithV) {
+  TempFile tmp;
+  int result =
+      FPrintF(tmp.file(), "STRING: %v NUMBER: %010d", std::string("ABC"), -19);
+  EXPECT_EQ(result, 30);
+  EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019");
+}
+
 TEST_F(FormatEntryPointTest, FPrintFError) {
   errno = 0;
   int result = FPrintF(stdin, "ABC");
@@ -305,6 +468,23 @@
   EXPECT_EQ(result, 30);
   EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019");
 }
+
+TEST_F(FormatEntryPointTest, PrintFWithV) {
+  int stdout_tmp = dup(STDOUT_FILENO);
+
+  TempFile tmp;
+  std::fflush(stdout);
+  dup2(fileno(tmp.file()), STDOUT_FILENO);
+
+  int result = PrintF("STRING: %v NUMBER: %010d", std::string("ABC"), -19);
+
+  std::fflush(stdout);
+  dup2(stdout_tmp, STDOUT_FILENO);
+  close(stdout_tmp);
+
+  EXPECT_EQ(result, 30);
+  EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019");
+}
 #endif  // __GLIBC__
 
 TEST_F(FormatEntryPointTest, SNPrintF) {
@@ -334,9 +514,41 @@
   EXPECT_EQ(result, 37);
 }
 
+TEST_F(FormatEntryPointTest, SNPrintFWithV) {
+  char buffer[16];
+  int result =
+      SNPrintF(buffer, sizeof(buffer), "STRING: %v", std::string("ABC"));
+  EXPECT_EQ(result, 11);
+  EXPECT_EQ(std::string(buffer), "STRING: ABC");
+
+  result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 123456);
+  EXPECT_EQ(result, 14);
+  EXPECT_EQ(std::string(buffer), "NUMBER: 123456");
+
+  result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 1234567);
+  EXPECT_EQ(result, 15);
+  EXPECT_EQ(std::string(buffer), "NUMBER: 1234567");
+
+  result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 12345678);
+  EXPECT_EQ(result, 16);
+  EXPECT_EQ(std::string(buffer), "NUMBER: 1234567");
+
+  result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 123456789);
+  EXPECT_EQ(result, 17);
+  EXPECT_EQ(std::string(buffer), "NUMBER: 1234567");
+
+  std::string size = "size";
+
+  result = SNPrintF(nullptr, 0, "Just checking the %v of the output.", size);
+  EXPECT_EQ(result, 37);
+}
+
 TEST(StrFormat, BehavesAsDocumented) {
   std::string s = absl::StrFormat("%s, %d!", "Hello", 123);
   EXPECT_EQ("Hello, 123!", s);
+  std::string hello = "Hello";
+  std::string s2 = absl::StrFormat("%v, %v!", hello, 123);
+  EXPECT_EQ("Hello, 123!", s2);
   // The format of a replacement is
   // '%'[position][flags][width['.'precision]][length_modifier][format]
   EXPECT_EQ(absl::StrFormat("%1$+3.2Lf", 1.1), "+1.10");
@@ -351,22 +563,31 @@
   //     "s" - string       Eg: "C" -> "C", std::string("C++") -> "C++"
   //           Formats std::string, char*, string_view, and Cord.
   EXPECT_EQ(StrFormat("%s", "C"), "C");
+  EXPECT_EQ(StrFormat("%v", std::string("C")), "C");
   EXPECT_EQ(StrFormat("%s", std::string("C++")), "C++");
+  EXPECT_EQ(StrFormat("%v", std::string("C++")), "C++");
   EXPECT_EQ(StrFormat("%s", string_view("view")), "view");
+  EXPECT_EQ(StrFormat("%v", string_view("view")), "view");
   EXPECT_EQ(StrFormat("%s", absl::Cord("cord")), "cord");
+  EXPECT_EQ(StrFormat("%v", absl::Cord("cord")), "cord");
   // Integral Conversion
   //     These format integral types: char, int, long, uint64_t, etc.
   EXPECT_EQ(StrFormat("%d", char{10}), "10");
   EXPECT_EQ(StrFormat("%d", int{10}), "10");
   EXPECT_EQ(StrFormat("%d", long{10}), "10");  // NOLINT
   EXPECT_EQ(StrFormat("%d", uint64_t{10}), "10");
+  EXPECT_EQ(StrFormat("%v", int{10}), "10");
+  EXPECT_EQ(StrFormat("%v", long{10}), "10");  // NOLINT
+  EXPECT_EQ(StrFormat("%v", uint64_t{10}), "10");
   //     d,i - signed decimal          Eg: -10 -> "-10"
   EXPECT_EQ(StrFormat("%d", -10), "-10");
   EXPECT_EQ(StrFormat("%i", -10), "-10");
+  EXPECT_EQ(StrFormat("%v", -10), "-10");
   //      o  - octal                   Eg:  10 -> "12"
   EXPECT_EQ(StrFormat("%o", 10), "12");
   //      u  - unsigned decimal        Eg:  10 -> "10"
   EXPECT_EQ(StrFormat("%u", 10), "10");
+  EXPECT_EQ(StrFormat("%v", 10), "10");
   //     x/X - lower,upper case hex    Eg:  10 -> "a"/"A"
   EXPECT_EQ(StrFormat("%x", 10), "a");
   EXPECT_EQ(StrFormat("%X", 10), "A");
@@ -391,6 +612,8 @@
   EXPECT_EQ(StrFormat("%g", .01), "0.01");
   EXPECT_EQ(StrFormat("%g", 1e10), "1e+10");
   EXPECT_EQ(StrFormat("%G", 1e10), "1E+10");
+  EXPECT_EQ(StrFormat("%v", .01), "0.01");
+  EXPECT_EQ(StrFormat("%v", 1e10), "1e+10");
   //     a/A - lower,upper case hex    Eg: -3.0 -> "-0x1.8p+1"/"-0X1.8P+1"
 
 // On Android platform <=21, there is a regression in hexfloat formatting.
@@ -415,6 +638,8 @@
   EXPECT_EQ(StrFormat("%#o", 10), "012");
   EXPECT_EQ(StrFormat("%#x", 15), "0xf");
   EXPECT_EQ(StrFormat("%04d", 8), "0008");
+  EXPECT_EQ(StrFormat("%#04x", 0), "0000");
+  EXPECT_EQ(StrFormat("%#04x", 1), "0x01");
   // Posix positional substitution.
   EXPECT_EQ(absl::StrFormat("%2$s, %3$s, %1$s!", "vici", "veni", "vidi"),
             "veni, vidi, vici!");
@@ -428,6 +653,11 @@
   EXPECT_EQ(StrFormat("%zd", int{1}), "1");
   EXPECT_EQ(StrFormat("%td", int{1}), "1");
   EXPECT_EQ(StrFormat("%qd", int{1}), "1");
+
+  // Bool is handled correctly depending on whether %v is used
+  EXPECT_EQ(StrFormat("%v", true), "true");
+  EXPECT_EQ(StrFormat("%v", false), "false");
+  EXPECT_EQ(StrFormat("%d", true), "1");
 }
 
 using str_format_internal::ExtendedParsedFormat;
@@ -477,6 +707,15 @@
             SummarizeParsedFormat(ParsedFormat<'s', '*', 'd'>("%s %.*d")));
 }
 
+TEST_F(ParsedFormatTest, SimpleCheckedWithV) {
+  EXPECT_EQ("[ABC]{v:1$v}[DEF]",
+            SummarizeParsedFormat(ParsedFormat<'v'>("ABC%vDEF")));
+  EXPECT_EQ("{v:1$v}[FFF]{v:2$v}[ZZZ]{f:3$f}",
+            SummarizeParsedFormat(ParsedFormat<'v', 'v', 'f'>("%vFFF%vZZZ%f")));
+  EXPECT_EQ("{v:1$v}[ ]{.*d:3$.2$*d}",
+            SummarizeParsedFormat(ParsedFormat<'v', '*', 'd'>("%v %.*d")));
+}
+
 TEST_F(ParsedFormatTest, SimpleUncheckedCorrect) {
   auto f = ParsedFormat<'d'>::New("ABC%dDEF");
   ASSERT_TRUE(f);
@@ -507,6 +746,23 @@
             SummarizeParsedFormat(*dollar));
 }
 
+TEST_F(ParsedFormatTest, SimpleUncheckedCorrectWithV) {
+  auto f = ParsedFormat<'v'>::New("ABC%vDEF");
+  ASSERT_TRUE(f);
+  EXPECT_EQ("[ABC]{v:1$v}[DEF]", SummarizeParsedFormat(*f));
+
+  std::string format = "%vFFF%vZZZ%f";
+  auto f2 = ParsedFormat<'v', 'v', 'f'>::New(format);
+
+  ASSERT_TRUE(f2);
+  EXPECT_EQ("{v:1$v}[FFF]{v:2$v}[ZZZ]{f:3$f}", SummarizeParsedFormat(*f2));
+
+  f2 = ParsedFormat<'v', 'v', 'f'>::New("%v %v %f");
+
+  ASSERT_TRUE(f2);
+  EXPECT_EQ("{v:1$v}[ ]{v:2$v}[ ]{f:3$f}", SummarizeParsedFormat(*f2));
+}
+
 TEST_F(ParsedFormatTest, SimpleUncheckedIgnoredArgs) {
   EXPECT_FALSE((ParsedFormat<'d', 's'>::New("ABC")));
   EXPECT_FALSE((ParsedFormat<'d', 's'>::New("%dABC")));
@@ -522,6 +778,18 @@
   EXPECT_EQ("[ABC]{2$s:2$s}", SummarizeParsedFormat(*f));
 }
 
+TEST_F(ParsedFormatTest, SimpleUncheckedIgnoredArgsWithV) {
+  EXPECT_FALSE((ParsedFormat<'v', 'v'>::New("ABC")));
+  EXPECT_FALSE((ParsedFormat<'v', 'v'>::New("%vABC")));
+  EXPECT_FALSE((ParsedFormat<'v', 's'>::New("ABC%2$s")));
+  auto f = ParsedFormat<'v', 'v'>::NewAllowIgnored("ABC");
+  ASSERT_TRUE(f);
+  EXPECT_EQ("[ABC]", SummarizeParsedFormat(*f));
+  f = ParsedFormat<'v', 'v'>::NewAllowIgnored("%vABC");
+  ASSERT_TRUE(f);
+  EXPECT_EQ("{v:1$v}[ABC]", SummarizeParsedFormat(*f));
+}
+
 TEST_F(ParsedFormatTest, SimpleUncheckedUnsupported) {
   EXPECT_FALSE(ParsedFormat<'d'>::New("%1$d %1$x"));
   EXPECT_FALSE(ParsedFormat<'x'>::New("%1$d %1$x"));
@@ -536,6 +804,15 @@
   EXPECT_FALSE((ParsedFormat<'s', 'd', 'g'>::New(format)));
 }
 
+TEST_F(ParsedFormatTest, SimpleUncheckedIncorrectWithV) {
+  EXPECT_FALSE(ParsedFormat<'v'>::New(""));
+
+  EXPECT_FALSE(ParsedFormat<'v'>::New("ABC%vDEF%v"));
+
+  std::string format = "%vFFF%vZZZ%f";
+  EXPECT_FALSE((ParsedFormat<'v', 'v', 'g'>::New(format)));
+}
+
 #if defined(__cpp_nontype_template_parameter_auto)
 
 template <auto T>
@@ -582,6 +859,23 @@
                          's'>::New("%s%s");
   ASSERT_TRUE(v4);
 }
+
+TEST_F(ParsedFormatTest, ExtendedTypingWithV) {
+  EXPECT_FALSE(ParsedFormat<FormatConversionCharSet::v>::New(""));
+  ASSERT_TRUE(ParsedFormat<absl::FormatConversionCharSet::v>::New("%v"));
+  auto v1 = ParsedFormat<'v', absl::FormatConversionCharSet::v>::New("%v%v");
+  ASSERT_TRUE(v1);
+  auto v2 = ParsedFormat<absl::FormatConversionCharSet::v, 'v'>::New("%v%v");
+  ASSERT_TRUE(v2);
+  auto v3 = ParsedFormat<absl::FormatConversionCharSet::v |
+                             absl::FormatConversionCharSet::v,
+                         'v'>::New("%v%v");
+  ASSERT_TRUE(v3);
+  auto v4 = ParsedFormat<absl::FormatConversionCharSet::v |
+                             absl::FormatConversionCharSet::v,
+                         'v'>::New("%v%v");
+  ASSERT_TRUE(v4);
+}
 #endif
 
 TEST_F(ParsedFormatTest, UncheckedCorrect) {
@@ -625,6 +919,28 @@
             SummarizeParsedFormat(*dollar));
 }
 
+TEST_F(ParsedFormatTest, UncheckedCorrectWithV) {
+  auto f =
+      ExtendedParsedFormat<absl::FormatConversionCharSet::v>::New("ABC%vDEF");
+  ASSERT_TRUE(f);
+  EXPECT_EQ("[ABC]{v:1$v}[DEF]", SummarizeParsedFormat(*f));
+
+  std::string format = "%vFFF%vZZZ%f";
+  auto f2 = ExtendedParsedFormat<
+      absl::FormatConversionCharSet::v, absl::FormatConversionCharSet::v,
+      absl::FormatConversionCharSet::kFloating>::New(format);
+
+  ASSERT_TRUE(f2);
+  EXPECT_EQ("{v:1$v}[FFF]{v:2$v}[ZZZ]{f:3$f}", SummarizeParsedFormat(*f2));
+
+  f2 = ExtendedParsedFormat<
+      absl::FormatConversionCharSet::v, absl::FormatConversionCharSet::v,
+      absl::FormatConversionCharSet::kFloating>::New("%v %v %f");
+
+  ASSERT_TRUE(f2);
+  EXPECT_EQ("{v:1$v}[ ]{v:2$v}[ ]{f:3$f}", SummarizeParsedFormat(*f2));
+}
+
 TEST_F(ParsedFormatTest, UncheckedIgnoredArgs) {
   EXPECT_FALSE(
       (ExtendedParsedFormat<absl::FormatConversionCharSet::d,
@@ -652,6 +968,28 @@
   EXPECT_EQ("[ABC]{2$s:2$s}", SummarizeParsedFormat(*f));
 }
 
+TEST_F(ParsedFormatTest, UncheckedIgnoredArgsWithV) {
+  EXPECT_FALSE(
+      (ExtendedParsedFormat<absl::FormatConversionCharSet::v,
+                            absl::FormatConversionCharSet::v>::New("ABC")));
+  EXPECT_FALSE(
+      (ExtendedParsedFormat<absl::FormatConversionCharSet::v,
+                            absl::FormatConversionCharSet::v>::New("%vABC")));
+  EXPECT_FALSE((ExtendedParsedFormat<absl::FormatConversionCharSet::v,
+                                     absl::FormatConversionCharSet::s>::
+                    New("ABC%2$s")));
+  auto f = ExtendedParsedFormat<
+      absl::FormatConversionCharSet::v,
+      absl::FormatConversionCharSet::v>::NewAllowIgnored("ABC");
+  ASSERT_TRUE(f);
+  EXPECT_EQ("[ABC]", SummarizeParsedFormat(*f));
+  f = ExtendedParsedFormat<
+      absl::FormatConversionCharSet::v,
+      absl::FormatConversionCharSet::v>::NewAllowIgnored("%vABC");
+  ASSERT_TRUE(f);
+  EXPECT_EQ("{v:1$v}[ABC]", SummarizeParsedFormat(*f));
+}
+
 TEST_F(ParsedFormatTest, UncheckedMultipleTypes) {
   auto dx =
       ExtendedParsedFormat<absl::FormatConversionCharSet::d |
@@ -678,12 +1016,35 @@
                             absl::FormatConversionCharSet::g>::New(format)));
 }
 
+TEST_F(ParsedFormatTest, UncheckedIncorrectWithV) {
+  EXPECT_FALSE(ExtendedParsedFormat<absl::FormatConversionCharSet::v>::New(""));
+
+  EXPECT_FALSE(ExtendedParsedFormat<absl::FormatConversionCharSet::v>::New(
+      "ABC%vDEF%v"));
+
+  std::string format = "%vFFF%vZZZ%f";
+  EXPECT_FALSE(
+      (ExtendedParsedFormat<absl::FormatConversionCharSet::v,
+                            absl::FormatConversionCharSet::g>::New(format)));
+}
+
 TEST_F(ParsedFormatTest, RegressionMixPositional) {
   EXPECT_FALSE(
       (ExtendedParsedFormat<absl::FormatConversionCharSet::d,
                             absl::FormatConversionCharSet::o>::New("%1$d %o")));
 }
 
+TEST_F(ParsedFormatTest, DisallowModifiersWithV) {
+  auto f = ParsedFormat<'v'>::New("ABC%80vDEF");
+  EXPECT_EQ(f, nullptr);
+
+  f = ParsedFormat<'v'>::New("ABC%0vDEF");
+  EXPECT_EQ(f, nullptr);
+
+  f = ParsedFormat<'v'>::New("ABC%.1vDEF");
+  EXPECT_EQ(f, nullptr);
+}
+
 using FormatWrapperTest = ::testing::Test;
 
 // Plain wrapper for StrFormat.
@@ -697,20 +1058,33 @@
   EXPECT_EQ(WrappedFormat("%s there", "hello"), "hello there");
 }
 
+TEST_F(FormatWrapperTest, ConstexprStringFormatWithV) {
+  std::string hello = "hello";
+  EXPECT_EQ(WrappedFormat("%v there", hello), "hello there");
+}
+
 TEST_F(FormatWrapperTest, ParsedFormat) {
   ParsedFormat<'s'> format("%s there");
   EXPECT_EQ(WrappedFormat(format, "hello"), "hello there");
 }
 
+TEST_F(FormatWrapperTest, ParsedFormatWithV) {
+  std::string hello = "hello";
+  ParsedFormat<'v'> format("%v there");
+  EXPECT_EQ(WrappedFormat(format, hello), "hello there");
+}
+
 }  // namespace
 ABSL_NAMESPACE_END
 }  // namespace absl
 
+namespace {
 using FormatExtensionTest = ::testing::Test;
 
 struct Point {
   friend absl::FormatConvertResult<absl::FormatConversionCharSet::kString |
-                                   absl::FormatConversionCharSet::kIntegral>
+                                   absl::FormatConversionCharSet::kIntegral |
+                                   absl::FormatConversionCharSet::v>
   AbslFormatConvert(const Point& p, const absl::FormatConversionSpec& spec,
                     absl::FormatSink* s) {
     if (spec.conversion_char() == absl::FormatConversionChar::s) {
@@ -729,6 +1103,7 @@
   Point p;
   EXPECT_EQ(absl::StrFormat("a %s z", p), "a x=10 y=20 z");
   EXPECT_EQ(absl::StrFormat("a %d z", p), "a 10,20 z");
+  EXPECT_EQ(absl::StrFormat("a %v z", p), "a 10,20 z");
 
   // Typed formatting will fail to compile an invalid format.
   // StrFormat("%f", p);  // Does not compile.
@@ -738,6 +1113,84 @@
   EXPECT_FALSE(absl::FormatUntyped(&actual, f1, {absl::FormatArg(p)}));
 }
 
+struct PointStringify {
+  template <typename FormatSink>
+  friend void AbslStringify(FormatSink& sink, const PointStringify& p) {
+    sink.Append(absl::StrCat("(", p.x, ", ", p.y, ")"));
+  }
+
+  double x = 10.0;
+  double y = 20.0;
+};
+
+TEST_F(FormatExtensionTest, AbslStringifyExample) {
+  PointStringify p;
+  EXPECT_EQ(absl::StrFormat("a %v z", p), "a (10, 20) z");
+}
+
+struct PointStringifyUsingFormat {
+  template <typename FormatSink>
+  friend void AbslStringify(FormatSink& sink,
+                            const PointStringifyUsingFormat& p) {
+    absl::Format(&sink, "(%g, %g)", p.x, p.y);
+  }
+
+  double x = 10.0;
+  double y = 20.0;
+};
+
+TEST_F(FormatExtensionTest, AbslStringifyExampleUsingFormat) {
+  PointStringifyUsingFormat p;
+  EXPECT_EQ(absl::StrFormat("a %v z", p), "a (10, 20) z");
+}
+
+enum class EnumClassWithStringify { Many = 0, Choices = 1 };
+
+template <typename Sink>
+void AbslStringify(Sink& sink, EnumClassWithStringify e) {
+  absl::Format(&sink, "%s",
+               e == EnumClassWithStringify::Many ? "Many" : "Choices");
+}
+
+enum EnumWithStringify { Many, Choices };
+
+template <typename Sink>
+void AbslStringify(Sink& sink, EnumWithStringify e) {
+  absl::Format(&sink, "%s", e == EnumWithStringify::Many ? "Many" : "Choices");
+}
+
+TEST_F(FormatExtensionTest, AbslStringifyWithEnumWithV) {
+  const auto e_class = EnumClassWithStringify::Choices;
+  EXPECT_EQ(absl::StrFormat("My choice is %v", e_class),
+            "My choice is Choices");
+
+  const auto e = EnumWithStringify::Choices;
+  EXPECT_EQ(absl::StrFormat("My choice is %v", e), "My choice is Choices");
+}
+
+TEST_F(FormatExtensionTest, AbslStringifyEnumWithD) {
+  const auto e_class = EnumClassWithStringify::Many;
+  EXPECT_EQ(absl::StrFormat("My choice is %d", e_class), "My choice is 0");
+
+  const auto e = EnumWithStringify::Choices;
+  EXPECT_EQ(absl::StrFormat("My choice is %d", e), "My choice is 1");
+}
+
+enum class EnumWithLargerValue { x = 32 };
+
+template <typename Sink>
+void AbslStringify(Sink& sink, EnumWithLargerValue e) {
+  absl::Format(&sink, "%s", "Many");
+}
+
+TEST_F(FormatExtensionTest, AbslStringifyEnumOtherSpecifiers) {
+  const auto e = EnumWithLargerValue::x;
+  EXPECT_EQ(absl::StrFormat("My choice is %g", e), "My choice is 32");
+  EXPECT_EQ(absl::StrFormat("My choice is %x", e), "My choice is 20");
+}
+
+}  // namespace
+
 // Some codegen thunks that we can use to easily dump the generated assembly for
 // different StrFormat calls.
 
diff --git a/abseil-cpp/absl/strings/str_join.h b/abseil-cpp/absl/strings/str_join.h
index ae5731a..ee5ae7e 100644
--- a/abseil-cpp/absl/strings/str_join.h
+++ b/abseil-cpp/absl/strings/str_join.h
@@ -72,21 +72,15 @@
 // functions. You may provide your own Formatter to enable `absl::StrJoin()` to
 // work with arbitrary types.
 //
-// The following is an example of a custom Formatter that simply uses
-// `std::to_string()` to format an integer as a std::string.
+// The following is an example of a custom Formatter that uses
+// `absl::FormatDuration` to join a list of `absl::Duration`s.
 //
-//   struct MyFormatter {
-//     void operator()(std::string* out, int i) const {
-//       out->append(std::to_string(i));
-//     }
-//   };
-//
-// You would use the above formatter by passing an instance of it as the final
-// argument to `absl::StrJoin()`:
-//
-//   std::vector<int> v = {1, 2, 3, 4};
-//   std::string s = absl::StrJoin(v, "-", MyFormatter());
-//   EXPECT_EQ("1-2-3-4", s);
+//   std::vector<absl::Duration> v = {absl::Seconds(1), absl::Milliseconds(10)};
+//   std::string s =
+//       absl::StrJoin(v, ", ", [](std::string* out, absl::Duration dur) {
+//         absl::StrAppend(out, absl::FormatDuration(dur));
+//       });
+//   EXPECT_EQ("1s, 10ms", s);
 //
 // The following standard formatters are provided within this file:
 //
@@ -144,7 +138,7 @@
       std::forward<Formatter>(f));
 }
 
-// Function overload of `DererefenceFormatter()` for using a default
+// Function overload of `DereferenceFormatter()` for using a default
 // `AlphaNumFormatter()`.
 inline strings_internal::DereferenceFormatterImpl<
     strings_internal::AlphaNumFormatterImpl>
diff --git a/abseil-cpp/absl/strings/str_join_test.cc b/abseil-cpp/absl/strings/str_join_test.cc
index 2be6256..c986e86 100644
--- a/abseil-cpp/absl/strings/str_join_test.cc
+++ b/abseil-cpp/absl/strings/str_join_test.cc
@@ -21,6 +21,7 @@
 #include <cstdio>
 #include <functional>
 #include <initializer_list>
+#include <iterator>
 #include <map>
 #include <memory>
 #include <ostream>
@@ -33,6 +34,7 @@
 #include "absl/memory/memory.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
+#include "absl/strings/string_view.h"
 
 namespace {
 
@@ -471,4 +473,136 @@
                           "-", absl::DereferenceFormatter(TestFormatter())));
 }
 
+// A minimal value type for `StrJoin` inputs.
+// Used to ensure we do not excessively require more a specific type, such as a
+// `string_view`.
+//
+// Anything that can be  `data()` and `size()` is OK.
+class TestValue {
+ public:
+  TestValue(const char* data, size_t size) : data_(data), size_(size) {}
+  const char* data() const { return data_; }
+  size_t size() const { return size_; }
+
+ private:
+  const char* data_;
+  size_t size_;
+};
+
+// A minimal C++20 forward iterator, used to test that we do not impose
+// excessive requirements on StrJoin inputs.
+//
+// The 2 main differences between pre-C++20 LegacyForwardIterator and the
+// C++20 ForwardIterator are:
+// 1. `operator->` is not required in C++20.
+// 2. `operator*` result does not need to be an lvalue (a reference).
+//
+// The `operator->` requirement was removed on page 17 in:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1037r0.pdf
+//
+// See the `[iterator.requirements]` section of the C++ standard.
+//
+// The value type is a template parameter so that we can test the behaviour
+// of `StrJoin` specializations, e.g. the NoFormatter specialization for
+// `string_view`.
+template <typename ValueT>
+class TestIterator {
+ public:
+  using iterator_category = std::forward_iterator_tag;
+  using value_type = ValueT;
+  using pointer = void;
+  using reference = const value_type&;
+  using difference_type = int;
+
+  // `data` must outlive the result.
+  static TestIterator begin(const std::vector<absl::string_view>& data) {
+    return TestIterator(&data, 0);
+  }
+
+  static TestIterator end(const std::vector<absl::string_view>& data) {
+    return TestIterator(nullptr, data.size());
+  }
+
+  bool operator==(const TestIterator& other) const {
+    return pos_ == other.pos_;
+  }
+  bool operator!=(const TestIterator& other) const {
+    return pos_ != other.pos_;
+  }
+
+  // This deliberately returns a `prvalue`.
+  // The requirement to return a reference was removed in C++20.
+  value_type operator*() const {
+    return ValueT((*data_)[pos_].data(), (*data_)[pos_].size());
+  }
+
+  // `operator->()` is deliberately omitted.
+  // The requirement to provide it was removed in C++20.
+
+  TestIterator& operator++() {
+    ++pos_;
+    return *this;
+  }
+
+  TestIterator operator++(int) {
+    TestIterator result = *this;
+    ++(*this);
+    return result;
+  }
+
+  TestIterator& operator--() {
+    --pos_;
+    return *this;
+  }
+
+  TestIterator operator--(int) {
+    TestIterator result = *this;
+    --(*this);
+    return result;
+  }
+
+ private:
+  TestIterator(const std::vector<absl::string_view>* data, size_t pos)
+      : data_(data), pos_(pos) {}
+
+  const std::vector<absl::string_view>* data_;
+  size_t pos_;
+};
+
+template <typename ValueT>
+class TestIteratorRange {
+ public:
+  // `data` must be non-null and must outlive the result.
+  explicit TestIteratorRange(const std::vector<absl::string_view>& data)
+      : begin_(TestIterator<ValueT>::begin(data)),
+        end_(TestIterator<ValueT>::end(data)) {}
+
+  const TestIterator<ValueT>& begin() const { return begin_; }
+  const TestIterator<ValueT>& end() const { return end_; }
+
+ private:
+  TestIterator<ValueT> begin_;
+  TestIterator<ValueT> end_;
+};
+
+TEST(StrJoin, TestIteratorRequirementsNoFormatter) {
+  const std::vector<absl::string_view> a = {"a", "b", "c"};
+
+  // When the value type is string-like (`std::string` or `string_view`),
+  // the NoFormatter template specialization is used internally.
+  EXPECT_EQ("a-b-c",
+            absl::StrJoin(TestIteratorRange<absl::string_view>(a), "-"));
+}
+
+TEST(StrJoin, TestIteratorRequirementsCustomFormatter) {
+  const std::vector<absl::string_view> a = {"a", "b", "c"};
+  EXPECT_EQ("a-b-c",
+            absl::StrJoin(TestIteratorRange<TestValue>(a), "-",
+                          [](std::string* out, const TestValue& value) {
+                            absl::StrAppend(
+                                out,
+                                absl::string_view(value.data(), value.size()));
+                          }));
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/strings/str_split.cc b/abseil-cpp/absl/strings/str_split.cc
index e08c26b..72ba7c0 100644
--- a/abseil-cpp/absl/strings/str_split.cc
+++ b/abseil-cpp/absl/strings/str_split.cc
@@ -60,19 +60,23 @@
 // Finds using absl::string_view::find(), therefore the length of the found
 // delimiter is delimiter.length().
 struct LiteralPolicy {
-  size_t Find(absl::string_view text, absl::string_view delimiter, size_t pos) {
+  static size_t Find(absl::string_view text, absl::string_view delimiter,
+                     size_t pos) {
     return text.find(delimiter, pos);
   }
-  size_t Length(absl::string_view delimiter) { return delimiter.length(); }
+  static size_t Length(absl::string_view delimiter) {
+    return delimiter.length();
+  }
 };
 
 // Finds using absl::string_view::find_first_of(), therefore the length of the
 // found delimiter is 1.
 struct AnyOfPolicy {
-  size_t Find(absl::string_view text, absl::string_view delimiter, size_t pos) {
+  static size_t Find(absl::string_view text, absl::string_view delimiter,
+                     size_t pos) {
     return text.find_first_of(delimiter, pos);
   }
-  size_t Length(absl::string_view /* delimiter */) { return 1; }
+  static size_t Length(absl::string_view /* delimiter */) { return 1; }
 };
 
 }  // namespace
@@ -123,8 +127,7 @@
   ABSL_RAW_CHECK(length > 0, "");
 }
 
-absl::string_view ByLength::Find(absl::string_view text,
-                                      size_t pos) const {
+absl::string_view ByLength::Find(absl::string_view text, size_t pos) const {
   pos = std::min(pos, text.size());  // truncate `pos`
   absl::string_view substr = text.substr(pos);
   // If the string is shorter than the chunk size we say we
diff --git a/abseil-cpp/absl/strings/str_split.h b/abseil-cpp/absl/strings/str_split.h
index 1ce17f3..7bbb68a 100644
--- a/abseil-cpp/absl/strings/str_split.h
+++ b/abseil-cpp/absl/strings/str_split.h
@@ -369,6 +369,12 @@
   }
 };
 
+template <typename T>
+using EnableSplitIfString =
+    typename std::enable_if<std::is_same<T, std::string>::value ||
+                            std::is_same<T, const std::string>::value,
+                            int>::type;
+
 //------------------------------------------------------------------------------
 //                                  StrSplit()
 //------------------------------------------------------------------------------
@@ -455,8 +461,7 @@
 // first two split strings become the `std::pair` `.first` and `.second`
 // members, respectively. The remaining split substrings are discarded. If there
 // are less than two split substrings, the empty string is used for the
-// corresponding
-// `std::pair` member.
+// corresponding `std::pair` member.
 //
 // Example:
 //
@@ -489,22 +494,50 @@
 // Try not to depend on this distinction because the bug may one day be fixed.
 template <typename Delimiter>
 strings_internal::Splitter<
-    typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty>
+    typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
+    absl::string_view>
 StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d) {
   using DelimiterType =
       typename strings_internal::SelectDelimiter<Delimiter>::type;
-  return strings_internal::Splitter<DelimiterType, AllowEmpty>(
+  return strings_internal::Splitter<DelimiterType, AllowEmpty,
+                                    absl::string_view>(
+      text.value(), DelimiterType(d), AllowEmpty());
+}
+
+template <typename Delimiter, typename StringType,
+          EnableSplitIfString<StringType> = 0>
+strings_internal::Splitter<
+    typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
+    std::string>
+StrSplit(StringType&& text, Delimiter d) {
+  using DelimiterType =
+      typename strings_internal::SelectDelimiter<Delimiter>::type;
+  return strings_internal::Splitter<DelimiterType, AllowEmpty, std::string>(
       std::move(text), DelimiterType(d), AllowEmpty());
 }
 
 template <typename Delimiter, typename Predicate>
 strings_internal::Splitter<
-    typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate>
+    typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
+    absl::string_view>
 StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d,
          Predicate p) {
   using DelimiterType =
       typename strings_internal::SelectDelimiter<Delimiter>::type;
-  return strings_internal::Splitter<DelimiterType, Predicate>(
+  return strings_internal::Splitter<DelimiterType, Predicate,
+                                    absl::string_view>(
+      text.value(), DelimiterType(d), std::move(p));
+}
+
+template <typename Delimiter, typename Predicate, typename StringType,
+          EnableSplitIfString<StringType> = 0>
+strings_internal::Splitter<
+    typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
+    std::string>
+StrSplit(StringType&& text, Delimiter d, Predicate p) {
+  using DelimiterType =
+      typename strings_internal::SelectDelimiter<Delimiter>::type;
+  return strings_internal::Splitter<DelimiterType, Predicate, std::string>(
       std::move(text), DelimiterType(d), std::move(p));
 }
 
diff --git a/abseil-cpp/absl/strings/str_split_test.cc b/abseil-cpp/absl/strings/str_split_test.cc
index b5ce68d..04a64a4 100644
--- a/abseil-cpp/absl/strings/str_split_test.cc
+++ b/abseil-cpp/absl/strings/str_split_test.cc
@@ -29,6 +29,8 @@
 #include "gtest/gtest.h"
 #include "absl/base/dynamic_annotations.h"
 #include "absl/base/macros.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
 #include "absl/container/flat_hash_map.h"
 #include "absl/container/node_hash_map.h"
 #include "absl/strings/numbers.h"
@@ -367,7 +369,7 @@
 TEST(Splitter, RangeIterators) {
   auto splitter = absl::StrSplit("a,b,c", ',');
   std::vector<absl::string_view> output;
-  for (const absl::string_view p : splitter) {
+  for (absl::string_view p : splitter) {
     output.push_back(p);
   }
   EXPECT_THAT(output, ElementsAre("a", "b", "c"));
@@ -405,6 +407,10 @@
   TestConversionOperator<std::set<std::string>>(splitter);
   TestConversionOperator<std::multiset<absl::string_view>>(splitter);
   TestConversionOperator<std::multiset<std::string>>(splitter);
+  TestConversionOperator<absl::btree_set<absl::string_view>>(splitter);
+  TestConversionOperator<absl::btree_set<std::string>>(splitter);
+  TestConversionOperator<absl::btree_multiset<absl::string_view>>(splitter);
+  TestConversionOperator<absl::btree_multiset<std::string>>(splitter);
   TestConversionOperator<std::unordered_set<std::string>>(splitter);
 
   // Tests conversion to map-like objects.
@@ -421,6 +427,22 @@
   TestMapConversionOperator<std::multimap<std::string, absl::string_view>>(
       splitter);
   TestMapConversionOperator<std::multimap<std::string, std::string>>(splitter);
+  TestMapConversionOperator<
+      absl::btree_map<absl::string_view, absl::string_view>>(splitter);
+  TestMapConversionOperator<absl::btree_map<absl::string_view, std::string>>(
+      splitter);
+  TestMapConversionOperator<absl::btree_map<std::string, absl::string_view>>(
+      splitter);
+  TestMapConversionOperator<absl::btree_map<std::string, std::string>>(
+      splitter);
+  TestMapConversionOperator<
+      absl::btree_multimap<absl::string_view, absl::string_view>>(splitter);
+  TestMapConversionOperator<
+      absl::btree_multimap<absl::string_view, std::string>>(splitter);
+  TestMapConversionOperator<
+      absl::btree_multimap<std::string, absl::string_view>>(splitter);
+  TestMapConversionOperator<absl::btree_multimap<std::string, std::string>>(
+      splitter);
   TestMapConversionOperator<std::unordered_map<std::string, std::string>>(
       splitter);
   TestMapConversionOperator<
@@ -921,8 +943,14 @@
 }
 
 TEST(Split, WorksWithLargeStrings) {
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+    defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
+  constexpr size_t kSize = (uint32_t{1} << 26) + 1;  // 64M + 1 byte
+#else
+  constexpr size_t kSize = (uint32_t{1} << 31) + 1;  // 2G + 1 byte
+#endif
   if (sizeof(size_t) > 4) {
-    std::string s((uint32_t{1} << 31) + 1, 'x');  // 2G + 1 byte
+    std::string s(kSize, 'x');
     s.back() = '-';
     std::vector<absl::string_view> v = absl::StrSplit(s, '-');
     EXPECT_EQ(2, v.size());
diff --git a/abseil-cpp/absl/strings/string_view.cc b/abseil-cpp/absl/strings/string_view.cc
index c5f5de9..f20ff53 100644
--- a/abseil-cpp/absl/strings/string_view.cc
+++ b/abseil-cpp/absl/strings/string_view.cc
@@ -21,18 +21,41 @@
 #include <cstring>
 #include <ostream>
 
-#include "absl/strings/internal/memutil.h"
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
 namespace {
+
+// This is significantly faster for case-sensitive matches with very
+// few possible matches.
+const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
+                     size_t neelen) {
+  if (0 == neelen) {
+    return phaystack;  // even if haylen is 0
+  }
+  if (haylen < neelen) return nullptr;
+
+  const char* match;
+  const char* hayend = phaystack + haylen - neelen + 1;
+  // A static cast is used here to work around the fact that memchr returns
+  // a void* on Posix-compliant systems and const void* on Windows.
+  while (
+      (match = static_cast<const char*>(memchr(
+           phaystack, pneedle[0], static_cast<size_t>(hayend - phaystack))))) {
+    if (memcmp(match, pneedle, neelen) == 0)
+      return match;
+    else
+      phaystack = match + 1;
+  }
+  return nullptr;
+}
+
 void WritePadding(std::ostream& o, size_t pad) {
   char fill_buf[32];
   memset(fill_buf, o.fill(), sizeof(fill_buf));
   while (pad) {
     size_t n = std::min(pad, sizeof(fill_buf));
-    o.write(fill_buf, n);
+    o.write(fill_buf, static_cast<std::streamsize>(n));
     pad -= n;
   }
 }
@@ -63,7 +86,7 @@
     size_t lpad = 0;
     size_t rpad = 0;
     if (static_cast<size_t>(o.width()) > piece.size()) {
-      size_t pad = o.width() - piece.size();
+      size_t pad = static_cast<size_t>(o.width()) - piece.size();
       if ((o.flags() & o.adjustfield) == o.left) {
         rpad = pad;
       } else {
@@ -71,22 +94,21 @@
       }
     }
     if (lpad) WritePadding(o, lpad);
-    o.write(piece.data(), piece.size());
+    o.write(piece.data(), static_cast<std::streamsize>(piece.size()));
     if (rpad) WritePadding(o, rpad);
     o.width(0);
   }
   return o;
 }
 
-string_view::size_type string_view::find(string_view s, size_type pos) const
-    noexcept {
+string_view::size_type string_view::find(string_view s,
+                                         size_type pos) const noexcept {
   if (empty() || pos > length_) {
     if (empty() && pos == 0 && s.empty()) return 0;
     return npos;
   }
-  const char* result =
-      strings_internal::memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
-  return result ? result - ptr_ : npos;
+  const char* result = memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
+  return result ? static_cast<size_type>(result - ptr_) : npos;
 }
 
 string_view::size_type string_view::find(char c, size_type pos) const noexcept {
@@ -95,21 +117,21 @@
   }
   const char* result =
       static_cast<const char*>(memchr(ptr_ + pos, c, length_ - pos));
-  return result != nullptr ? result - ptr_ : npos;
+  return result != nullptr ? static_cast<size_type>(result - ptr_) : npos;
 }
 
-string_view::size_type string_view::rfind(string_view s, size_type pos) const
-    noexcept {
+string_view::size_type string_view::rfind(string_view s,
+                                          size_type pos) const noexcept {
   if (length_ < s.length_) return npos;
   if (s.empty()) return std::min(length_, pos);
   const char* last = ptr_ + std::min(length_ - s.length_, pos) + s.length_;
   const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_);
-  return result != last ? result - ptr_ : npos;
+  return result != last ? static_cast<size_type>(result - ptr_) : npos;
 }
 
 // Search range is [0..pos] inclusive.  If pos == npos, search everything.
-string_view::size_type string_view::rfind(char c, size_type pos) const
-    noexcept {
+string_view::size_type string_view::rfind(char c,
+                                          size_type pos) const noexcept {
   // Note: memrchr() is not available on Windows.
   if (empty()) return npos;
   for (size_type i = std::min(pos, length_ - 1);; --i) {
@@ -121,9 +143,8 @@
   return npos;
 }
 
-string_view::size_type string_view::find_first_of(string_view s,
-                                                  size_type pos) const
-    noexcept {
+string_view::size_type string_view::find_first_of(
+    string_view s, size_type pos) const noexcept {
   if (empty() || s.empty()) {
     return npos;
   }
@@ -138,9 +159,8 @@
   return npos;
 }
 
-string_view::size_type string_view::find_first_not_of(string_view s,
-                                                      size_type pos) const
-    noexcept {
+string_view::size_type string_view::find_first_not_of(
+    string_view s, size_type pos) const noexcept {
   if (empty()) return npos;
   // Avoid the cost of LookupTable() for a single-character search.
   if (s.length_ == 1) return find_first_not_of(s.ptr_[0], pos);
@@ -153,9 +173,8 @@
   return npos;
 }
 
-string_view::size_type string_view::find_first_not_of(char c,
-                                                      size_type pos) const
-    noexcept {
+string_view::size_type string_view::find_first_not_of(
+    char c, size_type pos) const noexcept {
   if (empty()) return npos;
   for (; pos < length_; ++pos) {
     if (ptr_[pos] != c) {
@@ -180,9 +199,8 @@
   return npos;
 }
 
-string_view::size_type string_view::find_last_not_of(string_view s,
-                                                     size_type pos) const
-    noexcept {
+string_view::size_type string_view::find_last_not_of(
+    string_view s, size_type pos) const noexcept {
   if (empty()) return npos;
   size_type i = std::min(pos, length_ - 1);
   if (s.empty()) return i;
@@ -198,9 +216,8 @@
   return npos;
 }
 
-string_view::size_type string_view::find_last_not_of(char c,
-                                                     size_type pos) const
-    noexcept {
+string_view::size_type string_view::find_last_not_of(
+    char c, size_type pos) const noexcept {
   if (empty()) return npos;
   size_type i = std::min(pos, length_ - 1);
   for (;; --i) {
@@ -212,22 +229,11 @@
   return npos;
 }
 
-// MSVC has non-standard behavior that implicitly creates definitions for static
-// const members. These implicit definitions conflict with explicit out-of-class
-// member definitions that are required by the C++ standard, resulting in
-// LNK1169 "multiply defined" errors at link time. __declspec(selectany) asks
-// MSVC to choose only one definition for the symbol it decorates. See details
-// at https://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx
-#ifdef _MSC_VER
-#define ABSL_STRING_VIEW_SELECTANY __declspec(selectany)
-#else
-#define ABSL_STRING_VIEW_SELECTANY
-#endif
 
-ABSL_STRING_VIEW_SELECTANY
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr string_view::size_type string_view::npos;
-ABSL_STRING_VIEW_SELECTANY
 constexpr string_view::size_type string_view::kMaxSize;
+#endif
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/strings/string_view.h b/abseil-cpp/absl/strings/string_view.h
index 5260b5b..eae11b2 100644
--- a/abseil-cpp/absl/strings/string_view.h
+++ b/abseil-cpp/absl/strings/string_view.h
@@ -36,6 +36,7 @@
 #include <limits>
 #include <string>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/throw_delegate.h"
 #include "absl/base/macros.h"
@@ -54,8 +55,9 @@
 
 #else  // ABSL_USES_STD_STRING_VIEW
 
-#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
-    (defined(__GNUC__) && !defined(__clang__))
+#if ABSL_HAVE_BUILTIN(__builtin_memcmp) ||        \
+    (defined(__GNUC__) && !defined(__clang__)) || \
+    (defined(_MSC_VER) && _MSC_VER >= 1928)
 #define ABSL_INTERNAL_STRING_VIEW_MEMCMP __builtin_memcmp
 #else  // ABSL_HAVE_BUILTIN(__builtin_memcmp)
 #define ABSL_INTERNAL_STRING_VIEW_MEMCMP memcmp
@@ -180,18 +182,20 @@
 
   template <typename Allocator>
   string_view(  // NOLINT(runtime/explicit)
-      const std::basic_string<char, std::char_traits<char>, Allocator>&
-          str) noexcept
+      const std::basic_string<char, std::char_traits<char>, Allocator>& str
+          ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept
       // This is implemented in terms of `string_view(p, n)` so `str.size()`
       // doesn't need to be reevaluated after `ptr_` is set.
-      : string_view(str.data(), str.size()) {}
+      // The length check is also skipped since it is unnecessary and causes
+      // code bloat.
+      : string_view(str.data(), str.size(), SkipCheckLengthTag{}) {}
 
   // Implicit constructor of a `string_view` from NUL-terminated `str`. When
   // accepting possibly null strings, use `absl::NullSafeStringView(str)`
   // instead (see below).
+  // The length check is skipped since it is unnecessary and causes code bloat.
   constexpr string_view(const char* str)  // NOLINT(runtime/explicit)
-      : ptr_(str),
-        length_(str ? CheckLengthInternal(StrlenInternal(str)) : 0) {}
+      : ptr_(str), length_(str ? StrlenInternal(str) : 0) {}
 
   // Implicit constructor of a `string_view` from a `const char*` and length.
   constexpr string_view(const char* data, size_type len)
@@ -264,9 +268,7 @@
   // string_view::size()
   //
   // Returns the number of characters in the `string_view`.
-  constexpr size_type size() const noexcept {
-    return length_;
-  }
+  constexpr size_type size() const noexcept { return length_; }
 
   // string_view::length()
   //
@@ -333,7 +335,7 @@
   //
   // Removes the first `n` characters from the `string_view`. Note that the
   // underlying string is not changed, only the view.
-  void remove_prefix(size_type n) {
+  constexpr void remove_prefix(size_type n) {
     ABSL_HARDENING_ASSERT(n <= length_);
     ptr_ += n;
     length_ -= n;
@@ -343,7 +345,7 @@
   //
   // Removes the last `n` characters from the `string_view`. Note that the
   // underlying string is not changed, only the view.
-  void remove_suffix(size_type n) {
+  constexpr void remove_suffix(size_type n) {
     ABSL_HARDENING_ASSERT(n <= length_);
     length_ -= n;
   }
@@ -351,7 +353,7 @@
   // string_view::swap()
   //
   // Swaps this `string_view` with another `string_view`.
-  void swap(string_view& s) noexcept {
+  constexpr void swap(string_view& s) noexcept {
     auto t = *this;
     *this = s;
     s = t;
@@ -388,7 +390,7 @@
   // `n`) as another string_view. This function throws `std::out_of_bounds` if
   // `pos > size`.
   // Use absl::ClippedSubstr if you need a truncating substr operation.
-  constexpr string_view substr(size_type pos, size_type n = npos) const {
+  constexpr string_view substr(size_type pos = 0, size_type n = npos) const {
     return ABSL_PREDICT_FALSE(pos > length_)
                ? (base_internal::ThrowStdOutOfRange(
                       "absl::string_view::substr"),
@@ -398,12 +400,10 @@
 
   // string_view::compare()
   //
-  // Performs a lexicographical comparison between the `string_view` and
-  // another `absl::string_view`, returning -1 if `this` is less than, 0 if
-  // `this` is equal to, and 1 if `this` is greater than the passed string
-  // view. Note that in the case of data equality, a further comparison is made
-  // on the respective sizes of the two `string_view`s to determine which is
-  // smaller, equal, or greater.
+  // Performs a lexicographical comparison between this `string_view` and
+  // another `string_view` `x`, returning a negative value if `*this` is less
+  // than `x`, 0 if `*this` is equal to `x`, and a positive value if `*this`
+  // is greater than `x`.
   constexpr int compare(string_view x) const noexcept {
     return CompareImpl(length_, x.length_,
                        Min(length_, x.length_) == 0
@@ -414,31 +414,31 @@
 
   // Overload of `string_view::compare()` for comparing a substring of the
   // 'string_view` and another `absl::string_view`.
-  int compare(size_type pos1, size_type count1, string_view v) const {
+  constexpr int compare(size_type pos1, size_type count1, string_view v) const {
     return substr(pos1, count1).compare(v);
   }
 
   // Overload of `string_view::compare()` for comparing a substring of the
   // `string_view` and a substring of another `absl::string_view`.
-  int compare(size_type pos1, size_type count1, string_view v, size_type pos2,
-              size_type count2) const {
+  constexpr int compare(size_type pos1, size_type count1, string_view v,
+                        size_type pos2, size_type count2) const {
     return substr(pos1, count1).compare(v.substr(pos2, count2));
   }
 
   // Overload of `string_view::compare()` for comparing a `string_view` and a
-  // a different  C-style string `s`.
-  int compare(const char* s) const { return compare(string_view(s)); }
+  // a different C-style string `s`.
+  constexpr int compare(const char* s) const { return compare(string_view(s)); }
 
   // Overload of `string_view::compare()` for comparing a substring of the
   // `string_view` and a different string C-style string `s`.
-  int compare(size_type pos1, size_type count1, const char* s) const {
+  constexpr int compare(size_type pos1, size_type count1, const char* s) const {
     return substr(pos1, count1).compare(string_view(s));
   }
 
   // Overload of `string_view::compare()` for comparing a substring of the
   // `string_view` and a substring of a different C-style string `s`.
-  int compare(size_type pos1, size_type count1, const char* s,
-              size_type count2) const {
+  constexpr int compare(size_type pos1, size_type count1, const char* s,
+                        size_type count2) const {
     return substr(pos1, count1).compare(string_view(s, count2));
   }
 
@@ -455,48 +455,92 @@
   // within the `string_view`.
   size_type find(char c, size_type pos = 0) const noexcept;
 
+  // Overload of `string_view::find()` for finding a substring of a different
+  // C-style string `s` within the `string_view`.
+  size_type find(const char* s, size_type pos, size_type count) const {
+    return find(string_view(s, count), pos);
+  }
+
+  // Overload of `string_view::find()` for finding a different C-style string
+  // `s` within the `string_view`.
+  size_type find(const char* s, size_type pos = 0) const {
+    return find(string_view(s), pos);
+  }
+
   // string_view::rfind()
   //
   // Finds the last occurrence of a substring `s` within the `string_view`,
   // returning the position of the first character's match, or `npos` if no
   // match was found.
-  size_type rfind(string_view s, size_type pos = npos) const
-      noexcept;
+  size_type rfind(string_view s, size_type pos = npos) const noexcept;
 
   // Overload of `string_view::rfind()` for finding the last given character `c`
   // within the `string_view`.
   size_type rfind(char c, size_type pos = npos) const noexcept;
 
+  // Overload of `string_view::rfind()` for finding a substring of a different
+  // C-style string `s` within the `string_view`.
+  size_type rfind(const char* s, size_type pos, size_type count) const {
+    return rfind(string_view(s, count), pos);
+  }
+
+  // Overload of `string_view::rfind()` for finding a different C-style string
+  // `s` within the `string_view`.
+  size_type rfind(const char* s, size_type pos = npos) const {
+    return rfind(string_view(s), pos);
+  }
+
   // string_view::find_first_of()
   //
   // Finds the first occurrence of any of the characters in `s` within the
   // `string_view`, returning the start position of the match, or `npos` if no
   // match was found.
-  size_type find_first_of(string_view s, size_type pos = 0) const
-      noexcept;
+  size_type find_first_of(string_view s, size_type pos = 0) const noexcept;
 
   // Overload of `string_view::find_first_of()` for finding a character `c`
   // within the `string_view`.
-  size_type find_first_of(char c, size_type pos = 0) const
-      noexcept {
+  size_type find_first_of(char c, size_type pos = 0) const noexcept {
     return find(c, pos);
   }
 
+  // Overload of `string_view::find_first_of()` for finding a substring of a
+  // different C-style string `s` within the `string_view`.
+  size_type find_first_of(const char* s, size_type pos,
+                                    size_type count) const {
+    return find_first_of(string_view(s, count), pos);
+  }
+
+  // Overload of `string_view::find_first_of()` for finding a different C-style
+  // string `s` within the `string_view`.
+  size_type find_first_of(const char* s, size_type pos = 0) const {
+    return find_first_of(string_view(s), pos);
+  }
+
   // string_view::find_last_of()
   //
   // Finds the last occurrence of any of the characters in `s` within the
   // `string_view`, returning the start position of the match, or `npos` if no
   // match was found.
-  size_type find_last_of(string_view s, size_type pos = npos) const
-      noexcept;
+  size_type find_last_of(string_view s, size_type pos = npos) const noexcept;
 
   // Overload of `string_view::find_last_of()` for finding a character `c`
   // within the `string_view`.
-  size_type find_last_of(char c, size_type pos = npos) const
-      noexcept {
+  size_type find_last_of(char c, size_type pos = npos) const noexcept {
     return rfind(c, pos);
   }
 
+  // Overload of `string_view::find_last_of()` for finding a substring of a
+  // different C-style string `s` within the `string_view`.
+  size_type find_last_of(const char* s, size_type pos, size_type count) const {
+    return find_last_of(string_view(s, count), pos);
+  }
+
+  // Overload of `string_view::find_last_of()` for finding a different C-style
+  // string `s` within the `string_view`.
+  size_type find_last_of(const char* s, size_type pos = npos) const {
+    return find_last_of(string_view(s), pos);
+  }
+
   // string_view::find_first_not_of()
   //
   // Finds the first occurrence of any of the characters not in `s` within the
@@ -508,20 +552,51 @@
   // that is not `c` within the `string_view`.
   size_type find_first_not_of(char c, size_type pos = 0) const noexcept;
 
+  // Overload of `string_view::find_first_not_of()` for finding a substring of a
+  // different C-style string `s` within the `string_view`.
+  size_type find_first_not_of(const char* s, size_type pos,
+                              size_type count) const {
+    return find_first_not_of(string_view(s, count), pos);
+  }
+
+  // Overload of `string_view::find_first_not_of()` for finding a different
+  // C-style string `s` within the `string_view`.
+  size_type find_first_not_of(const char* s, size_type pos = 0) const {
+    return find_first_not_of(string_view(s), pos);
+  }
+
   // string_view::find_last_not_of()
   //
   // Finds the last occurrence of any of the characters not in `s` within the
   // `string_view`, returning the start position of the last non-match, or
   // `npos` if no non-match was found.
   size_type find_last_not_of(string_view s,
-                                          size_type pos = npos) const noexcept;
+                             size_type pos = npos) const noexcept;
 
   // Overload of `string_view::find_last_not_of()` for finding a character
   // that is not `c` within the `string_view`.
-  size_type find_last_not_of(char c, size_type pos = npos) const
-      noexcept;
+  size_type find_last_not_of(char c, size_type pos = npos) const noexcept;
+
+  // Overload of `string_view::find_last_not_of()` for finding a substring of a
+  // different C-style string `s` within the `string_view`.
+  size_type find_last_not_of(const char* s, size_type pos,
+                             size_type count) const {
+    return find_last_not_of(string_view(s, count), pos);
+  }
+
+  // Overload of `string_view::find_last_not_of()` for finding a different
+  // C-style string `s` within the `string_view`.
+  size_type find_last_not_of(const char* s, size_type pos = npos) const {
+    return find_last_not_of(string_view(s), pos);
+  }
 
  private:
+  // The constructor from std::string delegates to this constructor.
+  // See the comment on that constructor for the rationale.
+  struct SkipCheckLengthTag {};
+  string_view(const char* data, size_type len, SkipCheckLengthTag) noexcept
+      : ptr_(data), length_(len) {}
+
   static constexpr size_type kMaxSize =
       (std::numeric_limits<difference_type>::max)();
 
diff --git a/abseil-cpp/absl/strings/string_view_test.cc b/abseil-cpp/absl/strings/string_view_test.cc
index dcebb15..990c211 100644
--- a/abseil-cpp/absl/strings/string_view_test.cc
+++ b/abseil-cpp/absl/strings/string_view_test.cc
@@ -82,7 +82,7 @@
     // Null.
     absl::string_view s10;
     EXPECT_TRUE(s10.data() == nullptr);
-    EXPECT_EQ(0, s10.length());
+    EXPECT_EQ(0u, s10.length());
   }
 
   {
@@ -90,17 +90,17 @@
     const char* hello = "hello";
     absl::string_view s20(hello);
     EXPECT_TRUE(s20.data() == hello);
-    EXPECT_EQ(5, s20.length());
+    EXPECT_EQ(5u, s20.length());
 
     // const char* with length.
     absl::string_view s21(hello, 4);
     EXPECT_TRUE(s21.data() == hello);
-    EXPECT_EQ(4, s21.length());
+    EXPECT_EQ(4u, s21.length());
 
     // Not recommended, but valid C++
     absl::string_view s22(hello, 6);
     EXPECT_TRUE(s22.data() == hello);
-    EXPECT_EQ(6, s22.length());
+    EXPECT_EQ(6u, s22.length());
   }
 
   {
@@ -108,7 +108,7 @@
     std::string hola = "hola";
     absl::string_view s30(hola);
     EXPECT_TRUE(s30.data() == hola.data());
-    EXPECT_EQ(4, s30.length());
+    EXPECT_EQ(4u, s30.length());
 
     // std::string with embedded '\0'.
     hola.push_back('\0');
@@ -116,7 +116,7 @@
     hola.push_back('\0');
     absl::string_view s31(hola);
     EXPECT_TRUE(s31.data() == hola.data());
-    EXPECT_EQ(8, s31.length());
+    EXPECT_EQ(8u, s31.length());
   }
 
   {
@@ -165,7 +165,7 @@
   map.insert(std::make_pair(p1, 0));
   map.insert(std::make_pair(p2, 1));
   map.insert(std::make_pair(p3, 2));
-  EXPECT_EQ(map.size(), 3);
+  EXPECT_EQ(map.size(), 3u);
 
   TestMap::const_iterator iter = map.begin();
   EXPECT_EQ(iter->second, 1);
@@ -183,7 +183,7 @@
   EXPECT_TRUE(new_iter != map.end());
 
   map.erase(new_iter);
-  EXPECT_EQ(map.size(), 2);
+  EXPECT_EQ(map.size(), 2u);
 
   iter = map.begin();
   EXPECT_EQ(iter->second, 2);
@@ -261,11 +261,11 @@
 
 TEST(StringViewTest, ComparisonOperatorsByCharacterPosition) {
   std::string x;
-  for (int i = 0; i < 256; i++) {
+  for (size_t i = 0; i < 256; i++) {
     x += 'a';
     std::string y = x;
     COMPARE(true, ==, x, y);
-    for (int j = 0; j < i; j++) {
+    for (size_t j = 0; j < i; j++) {
       std::string z = x;
       z[j] = 'b';       // Differs in position 'j'
       COMPARE(false, ==, x, z);
@@ -341,12 +341,12 @@
   EXPECT_EQ(*(c.rend() - 1), 'x');
   EXPECT_TRUE(a.rbegin() + 26 == a.rend());
 
-  EXPECT_EQ(a.size(), 26);
-  EXPECT_EQ(b.size(), 3);
-  EXPECT_EQ(c.size(), 3);
-  EXPECT_EQ(d.size(), 6);
-  EXPECT_EQ(e.size(), 0);
-  EXPECT_EQ(f.size(), 7);
+  EXPECT_EQ(a.size(), 26u);
+  EXPECT_EQ(b.size(), 3u);
+  EXPECT_EQ(c.size(), 3u);
+  EXPECT_EQ(d.size(), 6u);
+  EXPECT_EQ(e.size(), 0u);
+  EXPECT_EQ(f.size(), 7u);
 
   EXPECT_TRUE(!d.empty());
   EXPECT_TRUE(d.begin() != d.end());
@@ -356,17 +356,17 @@
   EXPECT_TRUE(e.begin() == e.end());
 
   char buf[4] = { '%', '%', '%', '%' };
-  EXPECT_EQ(a.copy(buf, 4), 4);
+  EXPECT_EQ(a.copy(buf, 4), 4u);
   EXPECT_EQ(buf[0], a[0]);
   EXPECT_EQ(buf[1], a[1]);
   EXPECT_EQ(buf[2], a[2]);
   EXPECT_EQ(buf[3], a[3]);
-  EXPECT_EQ(a.copy(buf, 3, 7), 3);
+  EXPECT_EQ(a.copy(buf, 3, 7), 3u);
   EXPECT_EQ(buf[0], a[7]);
   EXPECT_EQ(buf[1], a[8]);
   EXPECT_EQ(buf[2], a[9]);
   EXPECT_EQ(buf[3], a[3]);
-  EXPECT_EQ(c.copy(buf, 99), 3);
+  EXPECT_EQ(c.copy(buf, 99), 3u);
   EXPECT_EQ(buf[0], c[0]);
   EXPECT_EQ(buf[1], c[1]);
   EXPECT_EQ(buf[2], c[2]);
@@ -393,22 +393,22 @@
       7);
 
   d = absl::string_view();
-  EXPECT_EQ(d.size(), 0);
+  EXPECT_EQ(d.size(), 0u);
   EXPECT_TRUE(d.empty());
   EXPECT_TRUE(d.data() == nullptr);
   EXPECT_TRUE(d.begin() == d.end());
 
-  EXPECT_EQ(a.find(b), 0);
+  EXPECT_EQ(a.find(b), 0u);
   EXPECT_EQ(a.find(b, 1), absl::string_view::npos);
-  EXPECT_EQ(a.find(c), 23);
-  EXPECT_EQ(a.find(c, 9), 23);
+  EXPECT_EQ(a.find(c), 23u);
+  EXPECT_EQ(a.find(c, 9), 23u);
   EXPECT_EQ(a.find(c, absl::string_view::npos), absl::string_view::npos);
   EXPECT_EQ(b.find(c), absl::string_view::npos);
   EXPECT_EQ(b.find(c, absl::string_view::npos), absl::string_view::npos);
-  EXPECT_EQ(a.find(d), 0);
-  EXPECT_EQ(a.find(e), 0);
-  EXPECT_EQ(a.find(d, 12), 12);
-  EXPECT_EQ(a.find(e, 17), 17);
+  EXPECT_EQ(a.find(d), 0u);
+  EXPECT_EQ(a.find(e), 0u);
+  EXPECT_EQ(a.find(d, 12), 12u);
+  EXPECT_EQ(a.find(e, 17), 17u);
   absl::string_view g("xx not found bb");
   EXPECT_EQ(a.find(g), absl::string_view::npos);
   // empty string nonsense
@@ -427,17 +427,17 @@
   EXPECT_EQ(e.find(d, 4), std::string().find(std::string(), 4));
   EXPECT_EQ(e.find(e, 4), std::string().find(std::string(), 4));
 
-  EXPECT_EQ(a.find('a'), 0);
-  EXPECT_EQ(a.find('c'), 2);
-  EXPECT_EQ(a.find('z'), 25);
+  EXPECT_EQ(a.find('a'), 0u);
+  EXPECT_EQ(a.find('c'), 2u);
+  EXPECT_EQ(a.find('z'), 25u);
   EXPECT_EQ(a.find('$'), absl::string_view::npos);
   EXPECT_EQ(a.find('\0'), absl::string_view::npos);
-  EXPECT_EQ(f.find('\0'), 3);
-  EXPECT_EQ(f.find('3'), 2);
-  EXPECT_EQ(f.find('5'), 5);
-  EXPECT_EQ(g.find('o'), 4);
-  EXPECT_EQ(g.find('o', 4), 4);
-  EXPECT_EQ(g.find('o', 5), 8);
+  EXPECT_EQ(f.find('\0'), 3u);
+  EXPECT_EQ(f.find('3'), 2u);
+  EXPECT_EQ(f.find('5'), 5u);
+  EXPECT_EQ(g.find('o'), 4u);
+  EXPECT_EQ(g.find('o', 4), 4u);
+  EXPECT_EQ(g.find('o', 5), 8u);
   EXPECT_EQ(a.find('b', 5), absl::string_view::npos);
   // empty string nonsense
   EXPECT_EQ(d.find('\0'), absl::string_view::npos);
@@ -449,9 +449,27 @@
   EXPECT_EQ(d.find('x', 4), absl::string_view::npos);
   EXPECT_EQ(e.find('x', 7), absl::string_view::npos);
 
-  EXPECT_EQ(a.rfind(b), 0);
-  EXPECT_EQ(a.rfind(b, 1), 0);
-  EXPECT_EQ(a.rfind(c), 23);
+  EXPECT_EQ(a.find(b.data(), 1, 0), 1u);
+  EXPECT_EQ(a.find(c.data(), 9, 0), 9u);
+  EXPECT_EQ(a.find(c.data(), absl::string_view::npos, 0),
+            absl::string_view::npos);
+  EXPECT_EQ(b.find(c.data(), absl::string_view::npos, 0),
+            absl::string_view::npos);
+  // empty string nonsense
+  EXPECT_EQ(d.find(b.data(), 4, 0), absl::string_view::npos);
+  EXPECT_EQ(e.find(b.data(), 7, 0), absl::string_view::npos);
+
+  EXPECT_EQ(a.find(b.data(), 1), absl::string_view::npos);
+  EXPECT_EQ(a.find(c.data(), 9), 23u);
+  EXPECT_EQ(a.find(c.data(), absl::string_view::npos), absl::string_view::npos);
+  EXPECT_EQ(b.find(c.data(), absl::string_view::npos), absl::string_view::npos);
+  // empty string nonsense
+  EXPECT_EQ(d.find(b.data(), 4), absl::string_view::npos);
+  EXPECT_EQ(e.find(b.data(), 7), absl::string_view::npos);
+
+  EXPECT_EQ(a.rfind(b), 0u);
+  EXPECT_EQ(a.rfind(b, 1), 0u);
+  EXPECT_EQ(a.rfind(c), 23u);
   EXPECT_EQ(a.rfind(c, 22), absl::string_view::npos);
   EXPECT_EQ(a.rfind(c, 1), absl::string_view::npos);
   EXPECT_EQ(a.rfind(c, 0), absl::string_view::npos);
@@ -459,8 +477,8 @@
   EXPECT_EQ(b.rfind(c, 0), absl::string_view::npos);
   EXPECT_EQ(a.rfind(d), std::string(a).rfind(std::string()));
   EXPECT_EQ(a.rfind(e), std::string(a).rfind(std::string()));
-  EXPECT_EQ(a.rfind(d, 12), 12);
-  EXPECT_EQ(a.rfind(e, 17), 17);
+  EXPECT_EQ(a.rfind(d, 12), 12u);
+  EXPECT_EQ(a.rfind(e, 17), 17u);
   EXPECT_EQ(a.rfind(g), absl::string_view::npos);
   EXPECT_EQ(d.rfind(b), absl::string_view::npos);
   EXPECT_EQ(e.rfind(b), absl::string_view::npos);
@@ -476,20 +494,28 @@
   EXPECT_EQ(d.rfind(e), std::string().rfind(std::string()));
   EXPECT_EQ(e.rfind(e), std::string().rfind(std::string()));
 
-  EXPECT_EQ(g.rfind('o'), 8);
+  EXPECT_EQ(g.rfind('o'), 8u);
   EXPECT_EQ(g.rfind('q'), absl::string_view::npos);
-  EXPECT_EQ(g.rfind('o', 8), 8);
-  EXPECT_EQ(g.rfind('o', 7), 4);
+  EXPECT_EQ(g.rfind('o', 8), 8u);
+  EXPECT_EQ(g.rfind('o', 7), 4u);
   EXPECT_EQ(g.rfind('o', 3), absl::string_view::npos);
-  EXPECT_EQ(f.rfind('\0'), 3);
-  EXPECT_EQ(f.rfind('\0', 12), 3);
-  EXPECT_EQ(f.rfind('3'), 2);
-  EXPECT_EQ(f.rfind('5'), 5);
+  EXPECT_EQ(f.rfind('\0'), 3u);
+  EXPECT_EQ(f.rfind('\0', 12), 3u);
+  EXPECT_EQ(f.rfind('3'), 2u);
+  EXPECT_EQ(f.rfind('5'), 5u);
   // empty string nonsense
   EXPECT_EQ(d.rfind('o'), absl::string_view::npos);
   EXPECT_EQ(e.rfind('o'), absl::string_view::npos);
   EXPECT_EQ(d.rfind('o', 4), absl::string_view::npos);
   EXPECT_EQ(e.rfind('o', 7), absl::string_view::npos);
+
+  EXPECT_EQ(a.rfind(b.data(), 1, 0), 1u);
+  EXPECT_EQ(a.rfind(c.data(), 22, 0), 22u);
+  EXPECT_EQ(a.rfind(c.data(), 1, 0), 1u);
+  EXPECT_EQ(a.rfind(c.data(), 0, 0), 0u);
+  EXPECT_EQ(b.rfind(c.data(), 0, 0), 0u);
+  EXPECT_EQ(d.rfind(b.data(), 4, 0), 0u);
+  EXPECT_EQ(e.rfind(b.data(), 7, 0), 0u);
 }
 
 // Continued from STL2
@@ -507,18 +533,18 @@
   absl::string_view g("xx not found bb");
 
   d = absl::string_view();
-  EXPECT_EQ(a.find_first_of(b), 0);
-  EXPECT_EQ(a.find_first_of(b, 0), 0);
-  EXPECT_EQ(a.find_first_of(b, 1), 1);
-  EXPECT_EQ(a.find_first_of(b, 2), 2);
+  EXPECT_EQ(a.find_first_of(b), 0u);
+  EXPECT_EQ(a.find_first_of(b, 0), 0u);
+  EXPECT_EQ(a.find_first_of(b, 1), 1u);
+  EXPECT_EQ(a.find_first_of(b, 2), 2u);
   EXPECT_EQ(a.find_first_of(b, 3), absl::string_view::npos);
-  EXPECT_EQ(a.find_first_of(c), 23);
-  EXPECT_EQ(a.find_first_of(c, 23), 23);
-  EXPECT_EQ(a.find_first_of(c, 24), 24);
-  EXPECT_EQ(a.find_first_of(c, 25), 25);
+  EXPECT_EQ(a.find_first_of(c), 23u);
+  EXPECT_EQ(a.find_first_of(c, 23), 23u);
+  EXPECT_EQ(a.find_first_of(c, 24), 24u);
+  EXPECT_EQ(a.find_first_of(c, 25), 25u);
   EXPECT_EQ(a.find_first_of(c, 26), absl::string_view::npos);
-  EXPECT_EQ(g.find_first_of(b), 13);
-  EXPECT_EQ(g.find_first_of(c), 0);
+  EXPECT_EQ(g.find_first_of(b), 13u);
+  EXPECT_EQ(g.find_first_of(c), 0u);
   EXPECT_EQ(a.find_first_of(f), absl::string_view::npos);
   EXPECT_EQ(f.find_first_of(a), absl::string_view::npos);
   // empty string nonsense
@@ -531,19 +557,19 @@
   EXPECT_EQ(d.find_first_of(e), absl::string_view::npos);
   EXPECT_EQ(e.find_first_of(e), absl::string_view::npos);
 
-  EXPECT_EQ(a.find_first_not_of(b), 3);
-  EXPECT_EQ(a.find_first_not_of(c), 0);
+  EXPECT_EQ(a.find_first_not_of(b), 3u);
+  EXPECT_EQ(a.find_first_not_of(c), 0u);
   EXPECT_EQ(b.find_first_not_of(a), absl::string_view::npos);
   EXPECT_EQ(c.find_first_not_of(a), absl::string_view::npos);
-  EXPECT_EQ(f.find_first_not_of(a), 0);
-  EXPECT_EQ(a.find_first_not_of(f), 0);
-  EXPECT_EQ(a.find_first_not_of(d), 0);
-  EXPECT_EQ(a.find_first_not_of(e), 0);
+  EXPECT_EQ(f.find_first_not_of(a), 0u);
+  EXPECT_EQ(a.find_first_not_of(f), 0u);
+  EXPECT_EQ(a.find_first_not_of(d), 0u);
+  EXPECT_EQ(a.find_first_not_of(e), 0u);
   // empty string nonsense
-  EXPECT_EQ(a.find_first_not_of(d), 0);
-  EXPECT_EQ(a.find_first_not_of(e), 0);
-  EXPECT_EQ(a.find_first_not_of(d, 1), 1);
-  EXPECT_EQ(a.find_first_not_of(e, 1), 1);
+  EXPECT_EQ(a.find_first_not_of(d), 0u);
+  EXPECT_EQ(a.find_first_not_of(e), 0u);
+  EXPECT_EQ(a.find_first_not_of(d, 1), 1u);
+  EXPECT_EQ(a.find_first_not_of(e, 1), 1u);
   EXPECT_EQ(a.find_first_not_of(d, a.size() - 1), a.size() - 1);
   EXPECT_EQ(a.find_first_not_of(e, a.size() - 1), a.size() - 1);
   EXPECT_EQ(a.find_first_not_of(d, a.size()), absl::string_view::npos);
@@ -562,11 +588,11 @@
   absl::string_view h("====");
   EXPECT_EQ(h.find_first_not_of('='), absl::string_view::npos);
   EXPECT_EQ(h.find_first_not_of('=', 3), absl::string_view::npos);
-  EXPECT_EQ(h.find_first_not_of('\0'), 0);
-  EXPECT_EQ(g.find_first_not_of('x'), 2);
-  EXPECT_EQ(f.find_first_not_of('\0'), 0);
-  EXPECT_EQ(f.find_first_not_of('\0', 3), 4);
-  EXPECT_EQ(f.find_first_not_of('\0', 2), 2);
+  EXPECT_EQ(h.find_first_not_of('\0'), 0u);
+  EXPECT_EQ(g.find_first_not_of('x'), 2u);
+  EXPECT_EQ(f.find_first_not_of('\0'), 0u);
+  EXPECT_EQ(f.find_first_not_of('\0', 3), 4u);
+  EXPECT_EQ(f.find_first_not_of('\0', 2), 2u);
   // empty string nonsense
   EXPECT_EQ(d.find_first_not_of('x'), absl::string_view::npos);
   EXPECT_EQ(e.find_first_not_of('x'), absl::string_view::npos);
@@ -592,20 +618,20 @@
 
   d = absl::string_view();
   EXPECT_EQ(h.find_last_of(a), absl::string_view::npos);
-  EXPECT_EQ(g.find_last_of(a), g.size()-1);
-  EXPECT_EQ(a.find_last_of(b), 2);
-  EXPECT_EQ(a.find_last_of(c), a.size()-1);
-  EXPECT_EQ(f.find_last_of(i), 6);
-  EXPECT_EQ(a.find_last_of('a'), 0);
-  EXPECT_EQ(a.find_last_of('b'), 1);
-  EXPECT_EQ(a.find_last_of('z'), 25);
-  EXPECT_EQ(a.find_last_of('a', 5), 0);
-  EXPECT_EQ(a.find_last_of('b', 5), 1);
+  EXPECT_EQ(g.find_last_of(a), g.size() - 1);
+  EXPECT_EQ(a.find_last_of(b), 2u);
+  EXPECT_EQ(a.find_last_of(c), a.size() - 1);
+  EXPECT_EQ(f.find_last_of(i), 6u);
+  EXPECT_EQ(a.find_last_of('a'), 0u);
+  EXPECT_EQ(a.find_last_of('b'), 1u);
+  EXPECT_EQ(a.find_last_of('z'), 25u);
+  EXPECT_EQ(a.find_last_of('a', 5), 0u);
+  EXPECT_EQ(a.find_last_of('b', 5), 1u);
   EXPECT_EQ(a.find_last_of('b', 0), absl::string_view::npos);
-  EXPECT_EQ(a.find_last_of('z', 25), 25);
+  EXPECT_EQ(a.find_last_of('z', 25), 25u);
   EXPECT_EQ(a.find_last_of('z', 24), absl::string_view::npos);
-  EXPECT_EQ(f.find_last_of(i, 5), 5);
-  EXPECT_EQ(f.find_last_of(i, 6), 6);
+  EXPECT_EQ(f.find_last_of(i, 5), 5u);
+  EXPECT_EQ(f.find_last_of(i, 6), 6u);
   EXPECT_EQ(f.find_last_of(a, 4), absl::string_view::npos);
   // empty string nonsense
   EXPECT_EQ(f.find_last_of(d), absl::string_view::npos);
@@ -625,19 +651,19 @@
   EXPECT_EQ(d.find_last_of(f, 4), absl::string_view::npos);
   EXPECT_EQ(e.find_last_of(f, 4), absl::string_view::npos);
 
-  EXPECT_EQ(a.find_last_not_of(b), a.size()-1);
-  EXPECT_EQ(a.find_last_not_of(c), 22);
+  EXPECT_EQ(a.find_last_not_of(b), a.size() - 1);
+  EXPECT_EQ(a.find_last_not_of(c), 22u);
   EXPECT_EQ(b.find_last_not_of(a), absl::string_view::npos);
   EXPECT_EQ(b.find_last_not_of(b), absl::string_view::npos);
-  EXPECT_EQ(f.find_last_not_of(i), 4);
-  EXPECT_EQ(a.find_last_not_of(c, 24), 22);
-  EXPECT_EQ(a.find_last_not_of(b, 3), 3);
+  EXPECT_EQ(f.find_last_not_of(i), 4u);
+  EXPECT_EQ(a.find_last_not_of(c, 24), 22u);
+  EXPECT_EQ(a.find_last_not_of(b, 3), 3u);
   EXPECT_EQ(a.find_last_not_of(b, 2), absl::string_view::npos);
   // empty string nonsense
-  EXPECT_EQ(f.find_last_not_of(d), f.size()-1);
-  EXPECT_EQ(f.find_last_not_of(e), f.size()-1);
-  EXPECT_EQ(f.find_last_not_of(d, 4), 4);
-  EXPECT_EQ(f.find_last_not_of(e, 4), 4);
+  EXPECT_EQ(f.find_last_not_of(d), f.size() - 1);
+  EXPECT_EQ(f.find_last_not_of(e), f.size() - 1);
+  EXPECT_EQ(f.find_last_not_of(d, 4), 4u);
+  EXPECT_EQ(f.find_last_not_of(e, 4), 4u);
   EXPECT_EQ(d.find_last_not_of(d), absl::string_view::npos);
   EXPECT_EQ(d.find_last_not_of(e), absl::string_view::npos);
   EXPECT_EQ(e.find_last_not_of(d), absl::string_view::npos);
@@ -653,10 +679,10 @@
 
   EXPECT_EQ(h.find_last_not_of('x'), h.size() - 1);
   EXPECT_EQ(h.find_last_not_of('='), absl::string_view::npos);
-  EXPECT_EQ(b.find_last_not_of('c'), 1);
-  EXPECT_EQ(h.find_last_not_of('x', 2), 2);
+  EXPECT_EQ(b.find_last_not_of('c'), 1u);
+  EXPECT_EQ(h.find_last_not_of('x', 2), 2u);
   EXPECT_EQ(h.find_last_not_of('=', 2), absl::string_view::npos);
-  EXPECT_EQ(b.find_last_not_of('b', 1), 0);
+  EXPECT_EQ(b.find_last_not_of('b', 1), 0u);
   // empty string nonsense
   EXPECT_EQ(d.find_last_not_of('x'), absl::string_view::npos);
   EXPECT_EQ(e.find_last_not_of('x'), absl::string_view::npos);
@@ -678,6 +704,7 @@
   EXPECT_EQ(a.substr(23, 3), c);
   EXPECT_EQ(a.substr(23, 99), c);
   EXPECT_EQ(a.substr(0), a);
+  EXPECT_EQ(a.substr(), a);
   EXPECT_EQ(a.substr(3, 2), "de");
   // empty string nonsense
   EXPECT_EQ(d.substr(0, 99), e);
@@ -707,7 +734,7 @@
 TEST(StringViewTest, UTF8) {
   std::string utf8 = "\u00E1";
   std::string utf8_twice = utf8 + " " + utf8;
-  int utf8_len = strlen(utf8.data());
+  size_t utf8_len = strlen(utf8.data());
   EXPECT_EQ(utf8_len, absl::string_view(utf8_twice).find_first_of(" "));
   EXPECT_EQ(utf8_len, absl::string_view(utf8_twice).find_first_of(" \t"));
 }
@@ -852,12 +879,12 @@
 TEST(StringViewTest, NULLInput) {
   absl::string_view s;
   EXPECT_EQ(s.data(), nullptr);
-  EXPECT_EQ(s.size(), 0);
+  EXPECT_EQ(s.size(), 0u);
 
 #ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR
   s = absl::string_view(nullptr);
   EXPECT_EQ(s.data(), nullptr);
-  EXPECT_EQ(s.size(), 0);
+  EXPECT_EQ(s.size(), 0u);
 
   // .ToString() on a absl::string_view with nullptr should produce the empty
   // string.
@@ -915,9 +942,9 @@
   EXPECT_EQ(abc.at(1), 'b');
   EXPECT_EQ(abc.at(2), 'c');
 #ifdef ABSL_HAVE_EXCEPTIONS
-  EXPECT_THROW(abc.at(3), std::out_of_range);
+  EXPECT_THROW((void)abc.at(3), std::out_of_range);
 #else
-  ABSL_EXPECT_DEATH_IF_SUPPORTED(abc.at(3), "absl::string_view::at");
+  ABSL_EXPECT_DEATH_IF_SUPPORTED((void)abc.at(3), "absl::string_view::at");
 #endif
 }
 
@@ -932,7 +959,7 @@
   {
     absl::string_view s = absl::NullSafeStringView(nullptr);
     EXPECT_EQ(nullptr, s.data());
-    EXPECT_EQ(0, s.size());
+    EXPECT_EQ(0u, s.size());
     EXPECT_EQ(absl::string_view(), s);
   }
   {
@@ -948,7 +975,7 @@
   {
     constexpr absl::string_view s = absl::NullSafeStringView(nullptr);
     EXPECT_EQ(nullptr, s.data());
-    EXPECT_EQ(0, s.size());
+    EXPECT_EQ(0u, s.size());
     EXPECT_EQ(absl::string_view(), s);
   }
 #if !defined(_MSC_VER) || _MSC_VER >= 1910
@@ -963,7 +990,7 @@
   }
   {
     constexpr absl::string_view s = absl::NullSafeStringView("hello");
-    EXPECT_EQ(s.size(), 5);
+    EXPECT_EQ(s.size(), 5u);
     EXPECT_EQ("hello", s);
   }
 #endif
@@ -1009,7 +1036,7 @@
 
 #ifdef ABSL_HAVE_CONSTEXPR_STRING_VIEW_FROM_CSTR
   constexpr absl::string_view cstr_strlen("foo");
-  EXPECT_EQ(cstr_strlen.length(), 3);
+  EXPECT_EQ(cstr_strlen.length(), 3u);
   constexpr absl::string_view cstr_strlen2 = "bar";
   EXPECT_EQ(cstr_strlen2, "bar");
 
@@ -1084,10 +1111,27 @@
   EXPECT_NE(cstr_ptr, nullptr);
 
   constexpr size_t sp_npos = sp.npos;
-  EXPECT_EQ(sp_npos, -1);
+  EXPECT_EQ(sp_npos, static_cast<size_t>(-1));
 }
 
-TEST(StringViewTest, ConstexprSubstr) {
+constexpr char ConstexprMethodsHelper() {
+#if defined(__cplusplus) && __cplusplus >= 201402L
+  absl::string_view str("123", 3);
+  str.remove_prefix(1);
+  str.remove_suffix(1);
+  absl::string_view bar;
+  str.swap(bar);
+  return bar.front();
+#else
+  return '2';
+#endif
+}
+
+TEST(StringViewTest, ConstexprMethods) {
+  // remove_prefix, remove_suffix, swap
+  static_assert(ConstexprMethodsHelper() == '2', "");
+
+  // substr
   constexpr absl::string_view foobar("foobar", 6);
   constexpr absl::string_view foo = foobar.substr(0, 3);
   constexpr absl::string_view bar = foobar.substr(3);
@@ -1135,7 +1179,7 @@
   // Abseil's string_view implementation has bounds-checking in debug mode.
   absl::string_view h = "hello";
   ABSL_EXPECT_DEATH_IF_SUPPORTED(h[5], "");
-  ABSL_EXPECT_DEATH_IF_SUPPORTED(h[-1], "");
+  ABSL_EXPECT_DEATH_IF_SUPPORTED(h[static_cast<size_t>(-1)], "");
 #endif
 #endif
 }
@@ -1145,7 +1189,7 @@
   EXPECT_LT("hello", std::string("world"));
 }
 
-TEST(ComparisonOpsTest, HeterogenousStringViewEquals) {
+TEST(ComparisonOpsTest, HeterogeneousStringViewEquals) {
   EXPECT_EQ(absl::string_view("hello"), std::string("hello"));
   EXPECT_EQ("hello", absl::string_view("hello"));
 }
@@ -1157,17 +1201,17 @@
   a.remove_prefix(1);
   a.remove_suffix(1);
 
-  EXPECT_EQ(0, a.find('x'));
-  EXPECT_EQ(0, a.find('x', 0));
-  EXPECT_EQ(4, a.find('x', 1));
-  EXPECT_EQ(4, a.find('x', 4));
+  EXPECT_EQ(0u, a.find('x'));
+  EXPECT_EQ(0u, a.find('x', 0));
+  EXPECT_EQ(4u, a.find('x', 1));
+  EXPECT_EQ(4u, a.find('x', 4));
   EXPECT_EQ(absl::string_view::npos, a.find('x', 5));
 
-  EXPECT_EQ(4, a.rfind('x'));
-  EXPECT_EQ(4, a.rfind('x', 5));
-  EXPECT_EQ(4, a.rfind('x', 4));
-  EXPECT_EQ(0, a.rfind('x', 3));
-  EXPECT_EQ(0, a.rfind('x', 0));
+  EXPECT_EQ(4u, a.rfind('x'));
+  EXPECT_EQ(4u, a.rfind('x', 5));
+  EXPECT_EQ(4u, a.rfind('x', 4));
+  EXPECT_EQ(0u, a.rfind('x', 3));
+  EXPECT_EQ(0u, a.rfind('x', 0));
 
   // Set a = "yyy".
   a.remove_prefix(1);
@@ -1195,8 +1239,8 @@
 
 #if !defined(NDEBUG) && !defined(ABSL_USES_STD_STRING_VIEW)
 TEST(NonNegativeLenTest, NonNegativeLen) {
-  ABSL_EXPECT_DEATH_IF_SUPPORTED(absl::string_view("xyz", -1),
-                                 "len <= kMaxSize");
+  ABSL_EXPECT_DEATH_IF_SUPPORTED(
+      absl::string_view("xyz", static_cast<size_t>(-1)), "len <= kMaxSize");
 }
 
 TEST(LenExceedsMaxSizeTest, LenExceedsMaxSize) {
diff --git a/abseil-cpp/absl/strings/strip.h b/abseil-cpp/absl/strings/strip.h
index 111872c..341e66f 100644
--- a/abseil-cpp/absl/strings/strip.h
+++ b/abseil-cpp/absl/strings/strip.h
@@ -34,8 +34,9 @@
 
 // ConsumePrefix()
 //
-// Strips the `expected` prefix from the start of the given string, returning
-// `true` if the strip operation succeeded or false otherwise.
+// Strips the `expected` prefix, if found, from the start of `str`.
+// If the operation succeeded, `true` is returned.  If not, `false`
+// is returned and `str` is not modified.
 //
 // Example:
 //
@@ -49,8 +50,9 @@
 }
 // ConsumeSuffix()
 //
-// Strips the `expected` suffix from the end of the given string, returning
-// `true` if the strip operation succeeded or false otherwise.
+// Strips the `expected` suffix, if found, from the end of `str`.
+// If the operation succeeded, `true` is returned.  If not, `false`
+// is returned and `str` is not modified.
 //
 // Example:
 //
@@ -65,7 +67,7 @@
 
 // StripPrefix()
 //
-// Returns a view into the input string 'str' with the given 'prefix' removed,
+// Returns a view into the input string `str` with the given `prefix` removed,
 // but leaving the original string intact. If the prefix does not match at the
 // start of the string, returns the original string instead.
 ABSL_MUST_USE_RESULT inline absl::string_view StripPrefix(
@@ -76,7 +78,7 @@
 
 // StripSuffix()
 //
-// Returns a view into the input string 'str' with the given 'suffix' removed,
+// Returns a view into the input string `str` with the given `suffix` removed,
 // but leaving the original string intact. If the suffix does not match at the
 // end of the string, returns the original string instead.
 ABSL_MUST_USE_RESULT inline absl::string_view StripSuffix(
diff --git a/abseil-cpp/absl/strings/substitute.cc b/abseil-cpp/absl/strings/substitute.cc
index 1f3c740..33a3930 100644
--- a/abseil-cpp/absl/strings/substitute.cc
+++ b/abseil-cpp/absl/strings/substitute.cc
@@ -40,7 +40,8 @@
                      absl::CEscape(format).c_str());
 #endif
         return;
-      } else if (absl::ascii_isdigit(format[i + 1])) {
+      } else if (absl::ascii_isdigit(
+                     static_cast<unsigned char>(format[i + 1]))) {
         int index = format[i + 1] - '0';
         if (static_cast<size_t>(index) >= num_args) {
 #ifndef NDEBUG
@@ -75,11 +76,12 @@
 
   // Build the string.
   size_t original_size = output->size();
-  strings_internal::STLStringResizeUninitialized(output, original_size + size);
+  strings_internal::STLStringResizeUninitializedAmortized(output,
+                                                          original_size + size);
   char* target = &(*output)[original_size];
   for (size_t i = 0; i < format.size(); i++) {
     if (format[i] == '$') {
-      if (absl::ascii_isdigit(format[i + 1])) {
+      if (absl::ascii_isdigit(static_cast<unsigned char>(format[i + 1]))) {
         const absl::string_view src = args_array[format[i + 1] - '0'];
         target = std::copy(src.begin(), src.end(), target);
         ++i;  // Skip next char.
@@ -109,7 +111,8 @@
     } while (num != 0);
     *--ptr = 'x';
     *--ptr = '0';
-    piece_ = absl::string_view(ptr, scratch_ + sizeof(scratch_) - ptr);
+    piece_ = absl::string_view(
+        ptr, static_cast<size_t>(scratch_ + sizeof(scratch_) - ptr));
   }
 }
 
@@ -131,7 +134,7 @@
     beg = writer;
   }
 
-  piece_ = absl::string_view(beg, end - beg);
+  piece_ = absl::string_view(beg, static_cast<size_t>(end - beg));
 }
 
 // TODO(jorg): Don't duplicate so much code between here and str_cat.cc
@@ -146,7 +149,7 @@
     *--writer = '0' + (value % 10);
     value /= 10;
   }
-  *--writer = '0' + value;
+  *--writer = '0' + static_cast<char>(value);
   if (neg) *--writer = '-';
 
   ptrdiff_t fillers = writer - minfill;
@@ -163,7 +166,7 @@
     if (add_sign_again) *--writer = '-';
   }
 
-  piece_ = absl::string_view(writer, end - writer);
+  piece_ = absl::string_view(writer, static_cast<size_t>(end - writer));
 }
 
 }  // namespace substitute_internal
diff --git a/abseil-cpp/absl/strings/substitute.h b/abseil-cpp/absl/strings/substitute.h
index c6da4dc..d6a5a69 100644
--- a/abseil-cpp/absl/strings/substitute.h
+++ b/abseil-cpp/absl/strings/substitute.h
@@ -55,6 +55,8 @@
 //   * bool (Printed as "true" or "false")
 //   * pointer types other than char* (Printed as "0x<lower case hex string>",
 //     except that null is printed as "NULL")
+//   * user-defined types via the `AbslStringify()` customization point. See the
+//     documentation for `absl::StrCat` for an explanation on how to use this.
 //
 // If an invalid format string is provided, Substitute returns an empty string
 // and SubstituteAndAppend does not change the provided output string.
@@ -79,6 +81,7 @@
 #include "absl/base/port.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/escaping.h"
+#include "absl/strings/internal/stringify_sink.h"
 #include "absl/strings/numbers.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
@@ -102,14 +105,14 @@
   // Overloads for string-y things
   //
   // Explicitly overload `const char*` so the compiler doesn't cast to `bool`.
-  Arg(const char* value)  // NOLINT(runtime/explicit)
+  Arg(const char* value)  // NOLINT(google-explicit-constructor)
       : piece_(absl::NullSafeStringView(value)) {}
   template <typename Allocator>
   Arg(  // NOLINT
       const std::basic_string<char, std::char_traits<char>, Allocator>&
           value) noexcept
       : piece_(value) {}
-  Arg(absl::string_view value)  // NOLINT(runtime/explicit)
+  Arg(absl::string_view value)  // NOLINT(google-explicit-constructor)
       : piece_(value) {}
 
   // Overloads for primitives
@@ -119,48 +122,70 @@
   // probably using them as 8-bit integers and would probably prefer an integer
   // representation. However, we can't really know, so we make the caller decide
   // what to do.
-  Arg(char value)  // NOLINT(runtime/explicit)
+  Arg(char value)  // NOLINT(google-explicit-constructor)
       : piece_(scratch_, 1) {
     scratch_[0] = value;
   }
   Arg(short value)  // NOLINT(*)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
   Arg(unsigned short value)  // NOLINT(*)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
-  Arg(int value)  // NOLINT(runtime/explicit)
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
+  Arg(int value)  // NOLINT(google-explicit-constructor)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
-  Arg(unsigned int value)  // NOLINT(runtime/explicit)
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
+  Arg(unsigned int value)  // NOLINT(google-explicit-constructor)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
   Arg(long value)  // NOLINT(*)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
   Arg(unsigned long value)  // NOLINT(*)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
   Arg(long long value)  // NOLINT(*)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
   Arg(unsigned long long value)  // NOLINT(*)
       : piece_(scratch_,
-               numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
-  Arg(float value)  // NOLINT(runtime/explicit)
+               static_cast<size_t>(
+                   numbers_internal::FastIntToBuffer(value, scratch_) -
+                   scratch_)) {}
+  Arg(float value)  // NOLINT(google-explicit-constructor)
       : piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) {
   }
-  Arg(double value)  // NOLINT(runtime/explicit)
+  Arg(double value)  // NOLINT(google-explicit-constructor)
       : piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) {
   }
-  Arg(bool value)  // NOLINT(runtime/explicit)
+  Arg(bool value)  // NOLINT(google-explicit-constructor)
       : piece_(value ? "true" : "false") {}
 
-  Arg(Hex hex);  // NOLINT(runtime/explicit)
-  Arg(Dec dec);  // NOLINT(runtime/explicit)
+  template <typename T, typename = typename std::enable_if<
+                            strings_internal::HasAbslStringify<T>::value>::type>
+  Arg(  // NOLINT(google-explicit-constructor)
+      const T& v, strings_internal::StringifySink&& sink = {})
+      : piece_(strings_internal::ExtractStringification(sink, v)) {}
 
-  // vector<bool>::reference and const_reference require special help to
-  // convert to `AlphaNum` because it requires two user defined conversions.
+  Arg(Hex hex);  // NOLINT(google-explicit-constructor)
+  Arg(Dec dec);  // NOLINT(google-explicit-constructor)
+
+  // vector<bool>::reference and const_reference require special help to convert
+  // to `Arg` because it requires two user defined conversions.
   template <typename T,
             absl::enable_if_t<
                 std::is_class<T>::value &&
@@ -172,7 +197,16 @@
 
   // `void*` values, with the exception of `char*`, are printed as
   // "0x<hex value>". However, in the case of `nullptr`, "NULL" is printed.
-  Arg(const void* value);  // NOLINT(runtime/explicit)
+  Arg(const void* value);  // NOLINT(google-explicit-constructor)
+
+  // Normal enums are already handled by the integer formatters.
+  // This overload matches only scoped enums.
+  template <typename T,
+            typename = typename std::enable_if<
+                std::is_enum<T>{} && !std::is_convertible<T, int>{} &&
+                !strings_internal::HasAbslStringify<T>::value>::type>
+  Arg(T value)  // NOLINT(google-explicit-constructor)
+      : Arg(static_cast<typename std::underlying_type<T>::type>(value)) {}
 
   Arg(const Arg&) = delete;
   Arg& operator=(const Arg&) = delete;
@@ -361,43 +395,49 @@
 // This body of functions catches cases where the number of placeholders
 // doesn't match the number of data arguments.
 void SubstituteAndAppend(std::string* output, const char* format)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 0,
-                     "There were no substitution arguments "
-                     "but this format string has a $[0-9] in it");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 0,
+        "There were no substitution arguments "
+        "but this format string either has a $[0-9] in it or contains "
+        "an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(std::string* output, const char* format,
                          const substitute_internal::Arg& a0)
     ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1,
                      "There was 1 substitution argument given, but "
-                     "this format string is either missing its $0, or "
-                     "contains one of $1-$9");
+                     "this format string is missing its $0, contains "
+                     "one of $1-$9, or contains an unescaped $ character (use "
+                     "$$ instead)");
 
 void SubstituteAndAppend(std::string* output, const char* format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 3,
-                     "There were 2 substitution arguments given, but "
-                     "this format string is either missing its $0/$1, or "
-                     "contains one of $2-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 3,
+        "There were 2 substitution arguments given, but this format string is "
+        "missing its $0/$1, contains one of $2-$9, or contains an "
+        "unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(std::string* output, const char* format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1,
                          const substitute_internal::Arg& a2)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 7,
-                     "There were 3 substitution arguments given, but "
-                     "this format string is either missing its $0/$1/$2, or "
-                     "contains one of $3-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 7,
+        "There were 3 substitution arguments given, but "
+        "this format string is missing its $0/$1/$2, contains one of "
+        "$3-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(std::string* output, const char* format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1,
                          const substitute_internal::Arg& a2,
                          const substitute_internal::Arg& a3)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 15,
-                     "There were 4 substitution arguments given, but "
-                     "this format string is either missing its $0-$3, or "
-                     "contains one of $4-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 15,
+        "There were 4 substitution arguments given, but "
+        "this format string is missing its $0-$3, contains one of "
+        "$4-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(std::string* output, const char* format,
                          const substitute_internal::Arg& a0,
@@ -405,10 +445,11 @@
                          const substitute_internal::Arg& a2,
                          const substitute_internal::Arg& a3,
                          const substitute_internal::Arg& a4)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 31,
-                     "There were 5 substitution arguments given, but "
-                     "this format string is either missing its $0-$4, or "
-                     "contains one of $5-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 31,
+        "There were 5 substitution arguments given, but "
+        "this format string is missing its $0-$4, contains one of "
+        "$5-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(std::string* output, const char* format,
                          const substitute_internal::Arg& a0,
@@ -417,20 +458,22 @@
                          const substitute_internal::Arg& a3,
                          const substitute_internal::Arg& a4,
                          const substitute_internal::Arg& a5)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 63,
-                     "There were 6 substitution arguments given, but "
-                     "this format string is either missing its $0-$5, or "
-                     "contains one of $6-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 63,
+        "There were 6 substitution arguments given, but "
+        "this format string is missing its $0-$5, contains one of "
+        "$6-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
     std::string* output, const char* format, const substitute_internal::Arg& a0,
     const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
     const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 127,
-                     "There were 7 substitution arguments given, but "
-                     "this format string is either missing its $0-$6, or "
-                     "contains one of $7-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 127,
+        "There were 7 substitution arguments given, but "
+        "this format string is missing its $0-$6, contains one of "
+        "$7-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
     std::string* output, const char* format, const substitute_internal::Arg& a0,
@@ -438,10 +481,11 @@
     const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
     const substitute_internal::Arg& a7)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 255,
-                     "There were 8 substitution arguments given, but "
-                     "this format string is either missing its $0-$7, or "
-                     "contains one of $8-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 255,
+        "There were 8 substitution arguments given, but "
+        "this format string is missing its $0-$7, contains one of "
+        "$8-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
     std::string* output, const char* format, const substitute_internal::Arg& a0,
@@ -452,7 +496,8 @@
     ABSL_BAD_CALL_IF(
         substitute_internal::PlaceholderBitmask(format) != 511,
         "There were 9 substitution arguments given, but "
-        "this format string is either missing its $0-$8, or contains a $9");
+        "this format string is missing its $0-$8, contains a $9, or "
+        "contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
     std::string* output, const char* format, const substitute_internal::Arg& a0,
@@ -461,9 +506,11 @@
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
     const substitute_internal::Arg& a7, const substitute_internal::Arg& a8,
     const substitute_internal::Arg& a9)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1023,
-                     "There were 10 substitution arguments given, but this "
-                     "format string doesn't contain all of $0 through $9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 1023,
+        "There were 10 substitution arguments given, but this "
+        "format string either doesn't contain all of $0 through $9 or "
+        "contains an unescaped $ character (use $$ instead)");
 #endif  // ABSL_BAD_CALL_IF
 
 // Substitute()
@@ -589,47 +636,53 @@
 std::string Substitute(const char* format)
     ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 0,
                      "There were no substitution arguments "
-                     "but this format string has a $[0-9] in it");
+                     "but this format string either has a $[0-9] in it or "
+                     "contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1,
-                     "There was 1 substitution argument given, but "
-                     "this format string is either missing its $0, or "
-                     "contains one of $1-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 1,
+        "There was 1 substitution argument given, but "
+        "this format string is missing its $0, contains one of $1-$9, "
+        "or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 3,
-                     "There were 2 substitution arguments given, but "
-                     "this format string is either missing its $0/$1, or "
-                     "contains one of $2-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 3,
+        "There were 2 substitution arguments given, but "
+        "this format string is missing its $0/$1, contains one of "
+        "$2-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 7,
-                     "There were 3 substitution arguments given, but "
-                     "this format string is either missing its $0/$1/$2, or "
-                     "contains one of $3-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 7,
+        "There were 3 substitution arguments given, but "
+        "this format string is missing its $0/$1/$2, contains one of "
+        "$3-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2,
                        const substitute_internal::Arg& a3)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 15,
-                     "There were 4 substitution arguments given, but "
-                     "this format string is either missing its $0-$3, or "
-                     "contains one of $4-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 15,
+        "There were 4 substitution arguments given, but "
+        "this format string is missing its $0-$3, contains one of "
+        "$4-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2,
                        const substitute_internal::Arg& a3,
                        const substitute_internal::Arg& a4)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 31,
-                     "There were 5 substitution arguments given, but "
-                     "this format string is either missing its $0-$4, or "
-                     "contains one of $5-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 31,
+        "There were 5 substitution arguments given, but "
+        "this format string is missing its $0-$4, contains one of "
+        "$5-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
@@ -637,10 +690,11 @@
                        const substitute_internal::Arg& a3,
                        const substitute_internal::Arg& a4,
                        const substitute_internal::Arg& a5)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 63,
-                     "There were 6 substitution arguments given, but "
-                     "this format string is either missing its $0-$5, or "
-                     "contains one of $6-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 63,
+        "There were 6 substitution arguments given, but "
+        "this format string is missing its $0-$5, contains one of "
+        "$6-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
@@ -649,10 +703,11 @@
                        const substitute_internal::Arg& a4,
                        const substitute_internal::Arg& a5,
                        const substitute_internal::Arg& a6)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 127,
-                     "There were 7 substitution arguments given, but "
-                     "this format string is either missing its $0-$6, or "
-                     "contains one of $7-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 127,
+        "There were 7 substitution arguments given, but "
+        "this format string is missing its $0-$6, contains one of "
+        "$7-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(const char* format, const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
@@ -662,10 +717,11 @@
                        const substitute_internal::Arg& a5,
                        const substitute_internal::Arg& a6,
                        const substitute_internal::Arg& a7)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 255,
-                     "There were 8 substitution arguments given, but "
-                     "this format string is either missing its $0-$7, or "
-                     "contains one of $8-$9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 255,
+        "There were 8 substitution arguments given, but "
+        "this format string is missing its $0-$7, contains one of "
+        "$8-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(
     const char* format, const substitute_internal::Arg& a0,
@@ -676,7 +732,8 @@
     ABSL_BAD_CALL_IF(
         substitute_internal::PlaceholderBitmask(format) != 511,
         "There were 9 substitution arguments given, but "
-        "this format string is either missing its $0-$8, or contains a $9");
+        "this format string is missing its $0-$8, contains a $9, or "
+        "contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(
     const char* format, const substitute_internal::Arg& a0,
@@ -685,9 +742,11 @@
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
     const substitute_internal::Arg& a7, const substitute_internal::Arg& a8,
     const substitute_internal::Arg& a9)
-    ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1023,
-                     "There were 10 substitution arguments given, but this "
-                     "format string doesn't contain all of $0 through $9");
+    ABSL_BAD_CALL_IF(
+        substitute_internal::PlaceholderBitmask(format) != 1023,
+        "There were 10 substitution arguments given, but this "
+        "format string either doesn't contain all of $0 through $9 or "
+        "contains an unescaped $ character (use $$ instead)");
 #endif  // ABSL_BAD_CALL_IF
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/strings/substitute_test.cc b/abseil-cpp/absl/strings/substitute_test.cc
index 442c921..ecf78d6 100644
--- a/abseil-cpp/absl/strings/substitute_test.cc
+++ b/abseil-cpp/absl/strings/substitute_test.cc
@@ -22,6 +22,16 @@
 
 namespace {
 
+struct MyStruct {
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, const MyStruct& s) {
+    sink.Append("MyStruct{.value = ");
+    sink.Append(absl::StrCat(s.value));
+    sink.Append("}");
+  }
+  int value;
+};
+
 TEST(SubstituteTest, Substitute) {
   // Basic.
   EXPECT_EQ("Hello, world!", absl::Substitute("$0, $1!", "Hello", "world"));
@@ -70,7 +80,7 @@
   // Volatile Pointer.
   // Like C++ streamed I/O, such pointers implicitly become bool
   volatile int vol = 237;
-  volatile int *volatile volptr = &vol;
+  volatile int* volatile volptr = &vol;
   str = absl::Substitute("$0", volptr);
   EXPECT_EQ("true", str);
 
@@ -128,6 +138,11 @@
 
   const char* null_cstring = nullptr;
   EXPECT_EQ("Text: ''", absl::Substitute("Text: '$0'", null_cstring));
+
+  MyStruct s1 = MyStruct{17};
+  MyStruct s2 = MyStruct{1043};
+  EXPECT_EQ("MyStruct{.value = 17}, MyStruct{.value = 1043}",
+            absl::Substitute("$0, $1", s1, s2));
 }
 
 TEST(SubstituteTest, SubstituteAndAppend) {
@@ -171,6 +186,12 @@
   absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5 $6 $7 $8 $9", "a", "b",
                             "c", "d", "e", "f", "g", "h", "i", "j");
   EXPECT_EQ("a b c d e f g h i j", str);
+
+  str.clear();
+  MyStruct s1 = MyStruct{17};
+  MyStruct s2 = MyStruct{1043};
+  absl::SubstituteAndAppend(&str, "$0, $1", s1, s2);
+  EXPECT_EQ("MyStruct{.value = 17}, MyStruct{.value = 1043}", str);
 }
 
 TEST(SubstituteTest, VectorBoolRef) {
@@ -184,7 +205,67 @@
   EXPECT_EQ("Logic be like: true false true false", str);
 }
 
-#ifdef GTEST_HAS_DEATH_TEST
+TEST(SubstituteTest, Enums) {
+  enum UnscopedEnum { kEnum0 = 0, kEnum1 = 1 };
+  EXPECT_EQ("0 1", absl::Substitute("$0 $1", UnscopedEnum::kEnum0,
+                                    UnscopedEnum::kEnum1));
+
+  enum class ScopedEnum { kEnum0 = 0, kEnum1 = 1 };
+  EXPECT_EQ("0 1",
+            absl::Substitute("$0 $1", ScopedEnum::kEnum0, ScopedEnum::kEnum1));
+
+  enum class ScopedEnumInt32 : int32_t { kEnum0 = 989, kEnum1 = INT32_MIN };
+  EXPECT_EQ("989 -2147483648",
+            absl::Substitute("$0 $1", ScopedEnumInt32::kEnum0,
+                             ScopedEnumInt32::kEnum1));
+
+  enum class ScopedEnumUInt32 : uint32_t { kEnum0 = 1, kEnum1 = UINT32_MAX };
+  EXPECT_EQ("1 4294967295", absl::Substitute("$0 $1", ScopedEnumUInt32::kEnum0,
+                                             ScopedEnumUInt32::kEnum1));
+
+  enum class ScopedEnumInt64 : int64_t { kEnum0 = -1, kEnum1 = 42949672950 };
+  EXPECT_EQ("-1 42949672950", absl::Substitute("$0 $1", ScopedEnumInt64::kEnum0,
+                                               ScopedEnumInt64::kEnum1));
+
+  enum class ScopedEnumUInt64 : uint64_t { kEnum0 = 1, kEnum1 = 42949672950 };
+  EXPECT_EQ("1 42949672950", absl::Substitute("$0 $1", ScopedEnumUInt64::kEnum0,
+                                              ScopedEnumUInt64::kEnum1));
+
+  enum class ScopedEnumChar : signed char { kEnum0 = -1, kEnum1 = 1 };
+  EXPECT_EQ("-1 1", absl::Substitute("$0 $1", ScopedEnumChar::kEnum0,
+                                     ScopedEnumChar::kEnum1));
+
+  enum class ScopedEnumUChar : unsigned char {
+    kEnum0 = 0,
+    kEnum1 = 1,
+    kEnumMax = 255
+  };
+  EXPECT_EQ("0 1 255", absl::Substitute("$0 $1 $2", ScopedEnumUChar::kEnum0,
+                                        ScopedEnumUChar::kEnum1,
+                                        ScopedEnumUChar::kEnumMax));
+
+  enum class ScopedEnumInt16 : int16_t { kEnum0 = -100, kEnum1 = 10000 };
+  EXPECT_EQ("-100 10000", absl::Substitute("$0 $1", ScopedEnumInt16::kEnum0,
+                                           ScopedEnumInt16::kEnum1));
+
+  enum class ScopedEnumUInt16 : uint16_t { kEnum0 = 0, kEnum1 = 10000 };
+  EXPECT_EQ("0 10000", absl::Substitute("$0 $1", ScopedEnumUInt16::kEnum0,
+                                        ScopedEnumUInt16::kEnum1));
+}
+
+enum class EnumWithStringify { Many = 0, Choices = 1 };
+
+template <typename Sink>
+void AbslStringify(Sink& sink, EnumWithStringify e) {
+  sink.Append(e == EnumWithStringify::Many ? "Many" : "Choices");
+}
+
+TEST(SubstituteTest, AbslStringifyWithEnum) {
+  const auto e = EnumWithStringify::Choices;
+  EXPECT_EQ(absl::Substitute("$0", e), "Choices");
+}
+
+#if GTEST_HAS_DEATH_TEST
 
 TEST(SubstituteDeathTest, SubstituteDeath) {
   EXPECT_DEBUG_DEATH(
diff --git a/abseil-cpp/absl/synchronization/BUILD.bazel b/abseil-cpp/absl/synchronization/BUILD.bazel
index 4d4d680..0ca94e0 100644
--- a/abseil-cpp/absl/synchronization/BUILD.bazel
+++ b/abseil-cpp/absl/synchronization/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -22,7 +21,7 @@
     "ABSL_TEST_COPTS",
 )
 
-package(default_visibility = ["//visibility:public"])
+package(default_visibility = ["//visibility:private"])
 
 licenses(["notice"])
 
@@ -35,11 +34,10 @@
     hdrs = [
         "internal/graphcycles.h",
     ],
-    copts = ABSL_DEFAULT_COPTS,
+    copts = ABSL_DEFAULT_COPTS + select({
+        "//conditions:default": [],
+    }),
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
     deps = [
         "//absl/base",
         "//absl/base:base_internal",
@@ -52,47 +50,76 @@
 
 cc_library(
     name = "kernel_timeout_internal",
+    srcs = ["internal/kernel_timeout.cc"],
     hdrs = ["internal/kernel_timeout.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     visibility = [
-        "//absl/synchronization:__pkg__",
     ],
     deps = [
+        "//absl/base",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/time",
     ],
 )
 
+cc_test(
+    name = "kernel_timeout_internal_test",
+    srcs = ["internal/kernel_timeout_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    flaky = 1,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":kernel_timeout_internal",
+        "//absl/base:config",
+        "//absl/random",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
 cc_library(
     name = "synchronization",
     srcs = [
         "barrier.cc",
         "blocking_counter.cc",
         "internal/create_thread_identity.cc",
+        "internal/futex_waiter.cc",
         "internal/per_thread_sem.cc",
-        "internal/waiter.cc",
+        "internal/pthread_waiter.cc",
+        "internal/sem_waiter.cc",
+        "internal/stdcpp_waiter.cc",
+        "internal/waiter_base.cc",
+        "internal/win32_waiter.cc",
+        "mutex.cc",
         "notification.cc",
-    ] + select({
-        "//conditions:default": ["mutex.cc"],
-    }),
+    ],
     hdrs = [
         "barrier.h",
         "blocking_counter.h",
         "internal/create_thread_identity.h",
-        "internal/mutex_nonprod.inc",
+        "internal/futex.h",
+        "internal/futex_waiter.h",
         "internal/per_thread_sem.h",
+        "internal/pthread_waiter.h",
+        "internal/sem_waiter.h",
+        "internal/stdcpp_waiter.h",
         "internal/waiter.h",
+        "internal/waiter_base.h",
+        "internal/win32_waiter.h",
         "mutex.h",
         "notification.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = select({
-        "//absl:windows": [],
+        "//absl:msvc_compiler": [],
+        "//absl:clang-cl_compiler": [],
         "//absl:wasm": [],
         "//conditions:default": ["-pthread"],
     }) + ABSL_DEFAULT_LINKOPTS,
+    visibility = ["//visibility:public"],
     deps = [
         ":graphcycles_internal",
         ":kernel_timeout_internal",
@@ -107,7 +134,9 @@
         "//absl/debugging:stacktrace",
         "//absl/debugging:symbolize",
         "//absl/time",
-    ],
+    ] + select({
+        "//conditions:default": [],
+    }),
 )
 
 cc_test(
@@ -116,6 +145,9 @@
     srcs = ["barrier_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",  # b/122473323
+    ],
     deps = [
         ":synchronization",
         "//absl/time",
@@ -129,6 +161,9 @@
     srcs = ["blocking_counter_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",  # b/122473323
+    ],
     deps = [
         ":synchronization",
         "//absl/time",
@@ -136,6 +171,20 @@
     ],
 )
 
+cc_binary(
+    name = "blocking_counter_benchmark",
+    testonly = 1,
+    srcs = ["blocking_counter_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    deps = [
+        ":synchronization",
+        ":thread_pool",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
 cc_test(
     name = "graphcycles_test",
     size = "medium",
@@ -145,7 +194,8 @@
     deps = [
         ":graphcycles_internal",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/log:check",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -176,6 +226,7 @@
     deps = [
         ":synchronization",
         "//absl/base:core_headers",
+        "//absl/functional:any_invocable",
     ],
 )
 
@@ -184,6 +235,7 @@
     size = "large",
     srcs = ["mutex_test.cc"],
     copts = ABSL_TEST_COPTS,
+    flaky = 1,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     shard_count = 25,
     deps = [
@@ -192,13 +244,26 @@
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
+        "//absl/log:check",
         "//absl/memory",
         "//absl/time",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
+cc_test(
+    name = "mutex_method_pointer_test",
+    srcs = ["mutex_method_pointer_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":synchronization",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
 cc_library(
     name = "mutex_benchmark_common",
     testonly = 1,
@@ -206,7 +271,6 @@
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     visibility = [
-        "//absl/synchronization:__pkg__",
     ],
     deps = [
         ":synchronization",
@@ -223,7 +287,6 @@
     testonly = 1,
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
     deps = [
         ":mutex_benchmark_common",
     ],
@@ -234,7 +297,9 @@
     size = "small",
     srcs = ["notification_test.cc"],
     copts = ABSL_TEST_COPTS,
+    flaky = 1,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["no_test_lexan"],
     deps = [
         ":synchronization",
         "//absl/time",
@@ -248,6 +313,8 @@
     srcs = ["internal/per_thread_sem_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+    ],
     deps = [
         ":synchronization",
         "//absl/base",
@@ -261,9 +328,12 @@
 
 cc_test(
     name = "per_thread_sem_test",
-    size = "medium",
+    size = "large",
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
     deps = [
         ":per_thread_sem_test_common",
         ":synchronization",
@@ -274,16 +344,36 @@
 )
 
 cc_test(
+    name = "waiter_test",
+    srcs = ["internal/waiter_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    flaky = 1,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":kernel_timeout_internal",
+        ":synchronization",
+        ":thread_pool",
+        "//absl/base:config",
+        "//absl/random",
+        "//absl/time",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "lifetime_test",
     srcs = [
         "lifetime_test.cc",
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = ["no_test_ios_x86_64"],
+    tags = [
+        "no_test_ios_x86_64",
+        "no_test_wasm",
+    ],
     deps = [
         ":synchronization",
         "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
+        "//absl/log:check",
     ],
 )
diff --git a/abseil-cpp/absl/synchronization/CMakeLists.txt b/abseil-cpp/absl/synchronization/CMakeLists.txt
index e5bc52f..a0f64e5 100644
--- a/abseil-cpp/absl/synchronization/CMakeLists.txt
+++ b/abseil-cpp/absl/synchronization/CMakeLists.txt
@@ -14,6 +14,7 @@
 # limitations under the License.
 #
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     graphcycles_internal
@@ -32,19 +33,39 @@
     absl::raw_logging_internal
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     kernel_timeout_internal
   HDRS
     "internal/kernel_timeout.h"
+  SRCS
+    "internal/kernel_timeout.cc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::base
+    absl::config
     absl::core_headers
     absl::raw_logging_internal
     absl::time
 )
 
+absl_cc_test(
+  NAME
+    kernel_timeout_internal_test
+  SRCS
+    "internal/kernel_timeout_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::kernel_timeout_internal
+    absl::config
+    absl::random_random
+    absl::time
+    GTest::gmock_main
+)
+
 absl_cc_library(
   NAME
     synchronization
@@ -52,17 +73,28 @@
     "barrier.h"
     "blocking_counter.h"
     "internal/create_thread_identity.h"
-    "internal/mutex_nonprod.inc"
+    "internal/futex.h"
+    "internal/futex_waiter.h"
     "internal/per_thread_sem.h"
+    "internal/pthread_waiter.h"
+    "internal/sem_waiter.h"
+    "internal/stdcpp_waiter.h"
     "internal/waiter.h"
+    "internal/waiter_base.h"
+    "internal/win32_waiter.h"
     "mutex.h"
     "notification.h"
   SRCS
     "barrier.cc"
     "blocking_counter.cc"
     "internal/create_thread_identity.cc"
+    "internal/futex_waiter.cc"
     "internal/per_thread_sem.cc"
-    "internal/waiter.cc"
+    "internal/pthread_waiter.cc"
+    "internal/sem_waiter.cc"
+    "internal/stdcpp_waiter.cc"
+    "internal/waiter_base.cc"
+    "internal/win32_waiter.cc"
     "notification.cc"
     "mutex.cc"
   COPTS
@@ -95,7 +127,7 @@
   DEPS
     absl::synchronization
     absl::time
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -108,7 +140,7 @@
   DEPS
     absl::synchronization
     absl::time
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -119,12 +151,14 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
-    absl::graphcycles_internal
+    absl::check
     absl::core_headers
-    absl::raw_logging_internal
-    gmock_main
+    absl::graphcycles_internal
+    absl::log
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     thread_pool
@@ -133,8 +167,9 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
-    absl::synchronization
+    absl::any_invocable
     absl::core_headers
+    absl::synchronization
   TESTONLY
 )
 
@@ -149,12 +184,27 @@
     absl::synchronization
     absl::thread_pool
     absl::base
+    absl::check
     absl::config
     absl::core_headers
+    absl::log
     absl::memory
-    absl::raw_logging_internal
     absl::time
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    mutex_method_pointer_test
+  SRCS
+    "mutex_method_pointer_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::synchronization
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -167,9 +217,10 @@
   DEPS
     absl::synchronization
     absl::time
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     per_thread_sem_test_common
@@ -183,7 +234,7 @@
     absl::config
     absl::strings
     absl::time
-    gmock
+    GTest::gmock
   TESTONLY
 )
 
@@ -199,7 +250,24 @@
     absl::synchronization
     absl::strings
     absl::time
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    waiter_test
+  SRCS
+    "internal/waiter_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::kernel_timeout_internal
+    absl::random_random
+    absl::synchronization
+    absl::thread_pool
+    absl::time
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -212,5 +280,5 @@
   DEPS
     absl::synchronization
     absl::core_headers
-    absl::raw_logging_internal
+    absl::check
 )
diff --git a/abseil-cpp/absl/synchronization/blocking_counter.cc b/abseil-cpp/absl/synchronization/blocking_counter.cc
index 3cea7ae..d2f82da 100644
--- a/abseil-cpp/absl/synchronization/blocking_counter.cc
+++ b/abseil-cpp/absl/synchronization/blocking_counter.cc
@@ -14,41 +14,51 @@
 
 #include "absl/synchronization/blocking_counter.h"
 
+#include <atomic>
+
 #include "absl/base/internal/raw_logging.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-// Return whether int *arg is zero.
-static bool IsZero(void *arg) {
-  return 0 == *reinterpret_cast<int *>(arg);
+namespace {
+
+// Return whether int *arg is true.
+bool IsDone(void *arg) { return *reinterpret_cast<bool *>(arg); }
+
+}  // namespace
+
+BlockingCounter::BlockingCounter(int initial_count)
+    : count_(initial_count),
+      num_waiting_(0),
+      done_{initial_count == 0 ? true : false} {
+  ABSL_RAW_CHECK(initial_count >= 0, "BlockingCounter initial_count negative");
 }
 
 bool BlockingCounter::DecrementCount() {
-  MutexLock l(&lock_);
-  count_--;
-  if (count_ < 0) {
-    ABSL_RAW_LOG(
-        FATAL,
-        "BlockingCounter::DecrementCount() called too many times.  count=%d",
-        count_);
+  int count = count_.fetch_sub(1, std::memory_order_acq_rel) - 1;
+  ABSL_RAW_CHECK(count >= 0,
+                 "BlockingCounter::DecrementCount() called too many times");
+  if (count == 0) {
+    MutexLock l(&lock_);
+    done_ = true;
+    return true;
   }
-  return count_ == 0;
+  return false;
 }
 
 void BlockingCounter::Wait() {
   MutexLock l(&this->lock_);
-  ABSL_RAW_CHECK(count_ >= 0, "BlockingCounter underflow");
 
   // only one thread may call Wait(). To support more than one thread,
   // implement a counter num_to_exit, like in the Barrier class.
   ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()");
   num_waiting_++;
 
-  this->lock_.Await(Condition(IsZero, &this->count_));
+  this->lock_.Await(Condition(IsDone, &this->done_));
 
-  // At this point, We know that all threads executing DecrementCount have
-  // released the lock, and so will not touch this object again.
+  // At this point, we know that all threads executing DecrementCount
+  // will not touch this object again.
   // Therefore, the thread calling this method is free to delete the object
   // after we return from this method.
 }
diff --git a/abseil-cpp/absl/synchronization/blocking_counter.h b/abseil-cpp/absl/synchronization/blocking_counter.h
index 1f53f9f..1908fdb 100644
--- a/abseil-cpp/absl/synchronization/blocking_counter.h
+++ b/abseil-cpp/absl/synchronization/blocking_counter.h
@@ -20,6 +20,8 @@
 #ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
 #define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
 
+#include <atomic>
+
 #include "absl/base/thread_annotations.h"
 #include "absl/synchronization/mutex.h"
 
@@ -60,8 +62,7 @@
 //
 class BlockingCounter {
  public:
-  explicit BlockingCounter(int initial_count)
-      : count_(initial_count), num_waiting_(0) {}
+  explicit BlockingCounter(int initial_count);
 
   BlockingCounter(const BlockingCounter&) = delete;
   BlockingCounter& operator=(const BlockingCounter&) = delete;
@@ -89,8 +90,9 @@
 
  private:
   Mutex lock_;
-  int count_ ABSL_GUARDED_BY(lock_);
+  std::atomic<int> count_;
   int num_waiting_ ABSL_GUARDED_BY(lock_);
+  bool done_ ABSL_GUARDED_BY(lock_);
 };
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/synchronization/blocking_counter_benchmark.cc b/abseil-cpp/absl/synchronization/blocking_counter_benchmark.cc
new file mode 100644
index 0000000..b504d1a
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/blocking_counter_benchmark.cc
@@ -0,0 +1,83 @@
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <limits>
+
+#include "absl/synchronization/blocking_counter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+void BM_BlockingCounter_SingleThread(benchmark::State& state) {
+  for (auto _ : state) {
+    int iterations = state.range(0);
+    absl::BlockingCounter counter{iterations};
+    for (int i = 0; i < iterations; ++i) {
+      counter.DecrementCount();
+    }
+    counter.Wait();
+  }
+}
+BENCHMARK(BM_BlockingCounter_SingleThread)
+    ->ArgName("iterations")
+    ->Arg(2)
+    ->Arg(4)
+    ->Arg(16)
+    ->Arg(64)
+    ->Arg(256);
+
+void BM_BlockingCounter_DecrementCount(benchmark::State& state) {
+  static absl::BlockingCounter* counter =
+      new absl::BlockingCounter{std::numeric_limits<int>::max()};
+  for (auto _ : state) {
+    counter->DecrementCount();
+  }
+}
+BENCHMARK(BM_BlockingCounter_DecrementCount)
+    ->Threads(2)
+    ->Threads(4)
+    ->Threads(6)
+    ->Threads(8)
+    ->Threads(10)
+    ->Threads(12)
+    ->Threads(16)
+    ->Threads(32)
+    ->Threads(64)
+    ->Threads(128);
+
+void BM_BlockingCounter_Wait(benchmark::State& state) {
+  int num_threads = state.range(0);
+  absl::synchronization_internal::ThreadPool pool(num_threads);
+  for (auto _ : state) {
+    absl::BlockingCounter counter{num_threads};
+    pool.Schedule([num_threads, &counter, &pool]() {
+      for (int i = 0; i < num_threads; ++i) {
+        pool.Schedule([&counter]() { counter.DecrementCount(); });
+      }
+    });
+    counter.Wait();
+  }
+}
+BENCHMARK(BM_BlockingCounter_Wait)
+    ->ArgName("threads")
+    ->Arg(2)
+    ->Arg(4)
+    ->Arg(8)
+    ->Arg(16)
+    ->Arg(32)
+    ->Arg(64)
+    ->Arg(128);
+
+}  // namespace
diff --git a/abseil-cpp/absl/synchronization/blocking_counter_test.cc b/abseil-cpp/absl/synchronization/blocking_counter_test.cc
index 2926224..06885f5 100644
--- a/abseil-cpp/absl/synchronization/blocking_counter_test.cc
+++ b/abseil-cpp/absl/synchronization/blocking_counter_test.cc
@@ -63,6 +63,18 @@
   }
 }
 
+TEST(BlockingCounterTest, WaitZeroInitialCount) {
+  BlockingCounter counter(0);
+  counter.Wait();
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(BlockingCounterTest, WaitNegativeInitialCount) {
+  EXPECT_DEATH(BlockingCounter counter(-1),
+               "BlockingCounter initial_count negative");
+}
+#endif
+
 }  // namespace
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc b/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
index 53a71b3..eacaa28 100644
--- a/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
+++ b/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
@@ -13,10 +13,12 @@
 // limitations under the License.
 
 #include <stdint.h>
+
 #include <new>
 
 // This file is a no-op if the required LowLevelAlloc support is missing.
 #include "absl/base/internal/low_level_alloc.h"
+#include "absl/synchronization/internal/waiter.h"
 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
 
 #include <string.h>
@@ -38,7 +40,7 @@
 
 // A per-thread destructor for reclaiming associated ThreadIdentity objects.
 // Since we must preserve their storage we cache them for re-use.
-void ReclaimThreadIdentity(void* v) {
+static void ReclaimThreadIdentity(void* v) {
   base_internal::ThreadIdentity* identity =
       static_cast<base_internal::ThreadIdentity*>(v);
 
@@ -48,8 +50,6 @@
     base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
   }
 
-  PerThreadSem::Destroy(identity);
-
   // We must explicitly clear the current thread's identity:
   // (a) Subsequent (unrelated) per-thread destructors may require an identity.
   //     We must guarantee a new identity is used in this case (this instructor
@@ -71,7 +71,15 @@
   return (addr + align - 1) & ~(align - 1);
 }
 
-static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
+void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
+  PerThreadSem::Init(identity);
+  identity->ticker.store(0, std::memory_order_relaxed);
+  identity->wait_start.store(0, std::memory_order_relaxed);
+  identity->is_idle.store(false, std::memory_order_relaxed);
+}
+
+static void ResetThreadIdentityBetweenReuse(
+    base_internal::ThreadIdentity* identity) {
   base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
   pts->next = nullptr;
   pts->skip = nullptr;
@@ -116,8 +124,9 @@
     identity = reinterpret_cast<base_internal::ThreadIdentity*>(
         RoundUp(reinterpret_cast<intptr_t>(allocation),
                 base_internal::PerThreadSynch::kAlignment));
+    OneTimeInitThreadIdentity(identity);
   }
-  ResetThreadIdentity(identity);
+  ResetThreadIdentityBetweenReuse(identity);
 
   return identity;
 }
@@ -127,7 +136,6 @@
 // REQUIRES: CurrentThreadIdentity(false) == nullptr
 base_internal::ThreadIdentity* CreateThreadIdentity() {
   base_internal::ThreadIdentity* identity = NewThreadIdentity();
-  PerThreadSem::Init(identity);
   // Associate the value with the current thread, and attach our destructor.
   base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
   return identity;
diff --git a/abseil-cpp/absl/synchronization/internal/create_thread_identity.h b/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
index e121f68..4cfde09 100644
--- a/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
+++ b/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
@@ -36,10 +36,6 @@
 // For private use only.
 base_internal::ThreadIdentity* CreateThreadIdentity();
 
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// For private use only.
-void ReclaimThreadIdentity(void* v);
-
 // Returns the ThreadIdentity object representing the calling thread; guaranteed
 // to be unique for its lifetime.  The returned object will remain valid for the
 // program's lifetime; although it may be re-assigned to a subsequent thread.
diff --git a/abseil-cpp/absl/synchronization/internal/futex.h b/abseil-cpp/absl/synchronization/internal/futex.h
new file mode 100644
index 0000000..573c01b
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/futex.h
@@ -0,0 +1,177 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+
+#include "absl/base/config.h"
+
+#ifndef _WIN32
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cstdint>
+#include <limits>
+
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
+#elif defined(__BIONIC__)
+// Bionic supports all the futex operations we need even when some of the futex
+// definitions are missing.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
+// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET 9
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+using FutexTimespec = struct timespec;
+#else
+// Some libc implementations have switched to an unconditional 64-bit `time_t`
+// definition. This means that `struct timespec` may not match the layout
+// expected by the kernel ABI on 32-bit platforms. So we define the
+// FutexTimespec that matches the kernel timespec definition. It should be safe
+// to use this struct for 64-bit userspace builds too, since it will use another
+// SYS_futex kernel call with 64-bit tv_sec inside timespec.
+struct FutexTimespec {
+  long tv_sec;   // NOLINT
+  long tv_nsec;  // NOLINT
+};
+#endif
+
+class FutexImpl {
+ public:
+  // Atomically check that `*v == val`, and if it is, then sleep until the until
+  // woken by `Wake()`.
+  static int Wait(std::atomic<int32_t>* v, int32_t val) {
+    return WaitAbsoluteTimeout(v, val, nullptr);
+  }
+
+  // Atomically check that `*v == val`, and if it is, then sleep until
+  // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`.
+  static int WaitAbsoluteTimeout(std::atomic<int32_t>* v, int32_t val,
+                                 const struct timespec* abs_timeout) {
+    FutexTimespec ts;
+    // https://locklessinc.com/articles/futex_cheat_sheet/
+    // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+    auto err = syscall(
+        SYS_futex, reinterpret_cast<int32_t*>(v),
+        FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+        ToFutexTimespec(abs_timeout, &ts), nullptr, FUTEX_BITSET_MATCH_ANY);
+    if (err != 0) {
+      return -errno;
+    }
+    return 0;
+  }
+
+  // Atomically check that `*v == val`, and if it is, then sleep until
+  // `*rel_timeout` has elapsed, or until woken by `Wake()`.
+  static int WaitRelativeTimeout(std::atomic<int32_t>* v, int32_t val,
+                                 const struct timespec* rel_timeout) {
+    FutexTimespec ts;
+    // Atomically check that the futex value is still 0, and if it
+    // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+    auto err =
+        syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_PRIVATE_FLAG,
+                val, ToFutexTimespec(rel_timeout, &ts));
+    if (err != 0) {
+      return -errno;
+    }
+    return 0;
+  }
+
+  // Wakes at most `count` waiters that have entered the sleep state on `v`.
+  static int Wake(std::atomic<int32_t>* v, int32_t count) {
+    auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+                       FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
+    if (ABSL_PREDICT_FALSE(err < 0)) {
+      return -errno;
+    }
+    return 0;
+  }
+
+ private:
+  static FutexTimespec* ToFutexTimespec(const struct timespec* userspace_ts,
+                                        FutexTimespec* futex_ts) {
+    if (userspace_ts == nullptr) {
+      return nullptr;
+    }
+
+    using FutexSeconds = decltype(futex_ts->tv_sec);
+    using FutexNanoseconds = decltype(futex_ts->tv_nsec);
+
+    constexpr auto kMaxSeconds{(std::numeric_limits<FutexSeconds>::max)()};
+    if (userspace_ts->tv_sec > kMaxSeconds) {
+      futex_ts->tv_sec = kMaxSeconds;
+    } else {
+      futex_ts->tv_sec = static_cast<FutexSeconds>(userspace_ts->tv_sec);
+    }
+    futex_ts->tv_nsec = static_cast<FutexNanoseconds>(userspace_ts->tv_nsec);
+    return futex_ts;
+  }
+};
+
+class Futex : public FutexImpl {};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/abseil-cpp/absl/synchronization/internal/futex_waiter.cc b/abseil-cpp/absl/synchronization/internal/futex_waiter.cc
new file mode 100644
index 0000000..87eb3b2
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/futex_waiter.cc
@@ -0,0 +1,111 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/futex_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX_WAITER
+
+#include <atomic>
+#include <cstdint>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char FutexWaiter::kName[];
+#endif
+
+int FutexWaiter::WaitUntil(std::atomic<int32_t>* v, int32_t val,
+                           KernelTimeout t) {
+#ifdef CLOCK_MONOTONIC
+  constexpr bool kHasClockMonotonic = true;
+#else
+  constexpr bool kHasClockMonotonic = false;
+#endif
+
+  // We can't call Futex::WaitUntil() here because the prodkernel implementation
+  // does not know about KernelTimeout::SupportsSteadyClock().
+  if (!t.has_timeout()) {
+    return Futex::Wait(v, val);
+  } else if (kHasClockMonotonic && KernelTimeout::SupportsSteadyClock() &&
+             t.is_relative_timeout()) {
+    auto rel_timespec = t.MakeRelativeTimespec();
+    return Futex::WaitRelativeTimeout(v, val, &rel_timespec);
+  } else {
+    auto abs_timespec = t.MakeAbsTimespec();
+    return Futex::WaitAbsoluteTimeout(v, val, &abs_timespec);
+  }
+}
+
+bool FutexWaiter::Wait(KernelTimeout t) {
+  // Loop until we can atomically decrement futex from a positive
+  // value, waiting on a futex while we believe it is zero.
+  // Note that, since the thread ticker is just reset, we don't need to check
+  // whether the thread is idle on the very first pass of the loop.
+  bool first_pass = true;
+  while (true) {
+    int32_t x = futex_.load(std::memory_order_relaxed);
+    while (x != 0) {
+      if (!futex_.compare_exchange_weak(x, x - 1,
+                                        std::memory_order_acquire,
+                                        std::memory_order_relaxed)) {
+        continue;  // Raced with someone, retry.
+      }
+      return true;  // Consumed a wakeup, we are done.
+    }
+
+    if (!first_pass) MaybeBecomeIdle();
+    const int err = WaitUntil(&futex_, 0, t);
+    if (err != 0) {
+      if (err == -EINTR || err == -EWOULDBLOCK) {
+        // Do nothing, the loop will retry.
+      } else if (err == -ETIMEDOUT) {
+        return false;
+      } else {
+        ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+      }
+    }
+    first_pass = false;
+  }
+}
+
+void FutexWaiter::Post() {
+  if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+    // We incremented from 0, need to wake a potential waiter.
+    Poke();
+  }
+}
+
+void FutexWaiter::Poke() {
+  // Wake one thread waiting on the futex.
+  const int err = Futex::Wake(&futex_, 1);
+  if (ABSL_PREDICT_FALSE(err < 0)) {
+    ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+  }
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_FUTEX_WAITER
diff --git a/abseil-cpp/absl/synchronization/internal/futex_waiter.h b/abseil-cpp/absl/synchronization/internal/futex_waiter.h
new file mode 100644
index 0000000..11dfa93
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/futex_waiter.h
@@ -0,0 +1,63 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_FUTEX_WAITER 1
+
+class FutexWaiter : public WaiterCrtp<FutexWaiter> {
+ public:
+  FutexWaiter() : futex_(0) {}
+
+  bool Wait(KernelTimeout t);
+  void Post();
+  void Poke();
+
+  static constexpr char kName[] = "FutexWaiter";
+
+ private:
+  // Atomically check that `*v == val`, and if it is, then sleep until the
+  // timeout `t` has been reached, or until woken by `Wake()`.
+  static int WaitUntil(std::atomic<int32_t>* v, int32_t val,
+                       KernelTimeout t);
+
+  // Futexes are defined by specification to be 32-bits.
+  // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+  std::atomic<int32_t> futex_;
+  static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
diff --git a/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/abseil-cpp/absl/synchronization/internal/graphcycles.cc
index 19f9aab..39b1848 100644
--- a/abseil-cpp/absl/synchronization/internal/graphcycles.cc
+++ b/abseil-cpp/absl/synchronization/internal/graphcycles.cc
@@ -37,6 +37,8 @@
 
 #include <algorithm>
 #include <array>
+#include <cinttypes>
+#include <limits>
 #include "absl/base/internal/hide_ptr.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/spinlock.h"
@@ -113,7 +115,7 @@
     if (src->ptr_ == src->space_) {
       // Need to actually copy
       resize(src->size_);
-      std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+      std::copy_n(src->ptr_, src->size_, ptr_);
       src->size_ = 0;
     } else {
       Discard();
@@ -147,7 +149,7 @@
     size_t request = static_cast<size_t>(capacity_) * sizeof(T);
     T* copy = static_cast<T*>(
         base_internal::LowLevelAlloc::AllocWithArena(request, arena));
-    std::copy(ptr_, ptr_ + size_, copy);
+    std::copy_n(ptr_, size_, copy);
     Discard();
     ptr_ = copy;
   }
@@ -180,9 +182,9 @@
     return true;
   }
 
-  void erase(uint32_t v) {
+  void erase(int32_t v) {
     uint32_t i = FindIndex(v);
-    if (static_cast<uint32_t>(table_[i]) == v) {
+    if (table_[i] == v) {
       table_[i] = kDel;
     }
   }
@@ -194,7 +196,7 @@
   for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
   bool Next(int32_t* cursor, int32_t* elem) {
     while (static_cast<uint32_t>(*cursor) < table_.size()) {
-      int32_t v = table_[*cursor];
+      int32_t v = table_[static_cast<uint32_t>(*cursor)];
       (*cursor)++;
       if (v >= 0) {
         *elem = v;
@@ -209,24 +211,26 @@
   Vec<int32_t> table_;
   uint32_t occupied_;     // Count of non-empty slots (includes deleted slots)
 
-  static uint32_t Hash(uint32_t a) { return a * 41; }
+  static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); }
 
   // Return index for storing v.  May return an empty index or deleted index
-  int FindIndex(int32_t v) const {
+  uint32_t FindIndex(int32_t v) const {
     // Search starting at hash index.
     const uint32_t mask = table_.size() - 1;
     uint32_t i = Hash(v) & mask;
-    int deleted_index = -1;  // If >= 0, index of first deleted element we see
+    uint32_t deleted_index = 0;  // index of first deleted element we see
+    bool seen_deleted_element = false;
     while (true) {
       int32_t e = table_[i];
       if (v == e) {
         return i;
       } else if (e == kEmpty) {
         // Return any previously encountered deleted slot.
-        return (deleted_index >= 0) ? deleted_index : i;
-      } else if (e == kDel && deleted_index < 0) {
+        return seen_deleted_element ? deleted_index : i;
+      } else if (e == kDel && !seen_deleted_element) {
         // Keep searching since v might be present later.
         deleted_index = i;
+        seen_deleted_element = true;
       }
       i = (i + 1) & mask;  // Linear probing; quadratic is slightly slower.
     }
@@ -267,7 +271,7 @@
 }
 
 inline int32_t NodeIndex(GraphId id) {
-  return static_cast<uint32_t>(id.handle & 0xfffffffful);
+  return static_cast<int32_t>(id.handle);
 }
 
 inline uint32_t NodeVersion(GraphId id) {
@@ -297,7 +301,7 @@
   int32_t Find(void* ptr) {
     auto masked = base_internal::HidePtr(ptr);
     for (int32_t i = table_[Hash(ptr)]; i != -1;) {
-      Node* n = (*nodes_)[i];
+      Node* n = (*nodes_)[static_cast<uint32_t>(i)];
       if (n->masked_ptr == masked) return i;
       i = n->next_hash;
     }
@@ -306,7 +310,7 @@
 
   void Add(void* ptr, int32_t i) {
     int32_t* head = &table_[Hash(ptr)];
-    (*nodes_)[i]->next_hash = *head;
+    (*nodes_)[static_cast<uint32_t>(i)]->next_hash = *head;
     *head = i;
   }
 
@@ -316,7 +320,7 @@
     auto masked = base_internal::HidePtr(ptr);
     for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
       int32_t index = *slot;
-      Node* n = (*nodes_)[index];
+      Node* n = (*nodes_)[static_cast<uint32_t>(index)];
       if (n->masked_ptr == masked) {
         *slot = n->next_hash;  // Remove n from linked list
         n->next_hash = -1;
@@ -357,7 +361,7 @@
 };
 
 static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
-  Node* n = rep->nodes_[NodeIndex(id)];
+  Node* n = rep->nodes_[static_cast<uint32_t>(NodeIndex(id))];
   return (n->version == NodeVersion(id)) ? n : nullptr;
 }
 
@@ -383,19 +387,22 @@
     Node* nx = r->nodes_[x];
     void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
     if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
-      ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+      ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %" PRIu32 " %p",
+                   x, ptr);
     }
     if (nx->visited) {
-      ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+      ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %" PRIu32, x);
     }
     if (!ranks.insert(nx->rank)) {
-      ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+      ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %" PRId32, nx->rank);
     }
     HASH_FOR_EACH(y, nx->out) {
-      Node* ny = r->nodes_[y];
+      Node* ny = r->nodes_[static_cast<uint32_t>(y)];
       if (nx->rank >= ny->rank) {
-        ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
-                     nx->rank, ny->rank);
+        ABSL_RAW_LOG(FATAL,
+                     "Edge %" PRIu32 " ->%" PRId32
+                     " has bad rank assignment %" PRId32 "->%" PRId32,
+                     x, y, nx->rank, ny->rank);
       }
     }
   }
@@ -405,14 +412,14 @@
 GraphId GraphCycles::GetId(void* ptr) {
   int32_t i = rep_->ptrmap_.Find(ptr);
   if (i != -1) {
-    return MakeId(i, rep_->nodes_[i]->version);
+    return MakeId(i, rep_->nodes_[static_cast<uint32_t>(i)]->version);
   } else if (rep_->free_nodes_.empty()) {
     Node* n =
         new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
             Node;
     n->version = 1;  // Avoid 0 since it is used by InvalidGraphId()
     n->visited = false;
-    n->rank = rep_->nodes_.size();
+    n->rank = static_cast<int32_t>(rep_->nodes_.size());
     n->masked_ptr = base_internal::HidePtr(ptr);
     n->nstack = 0;
     n->priority = 0;
@@ -424,7 +431,7 @@
     // a permutation of [0,rep_->nodes_.size()-1].
     int32_t r = rep_->free_nodes_.back();
     rep_->free_nodes_.pop_back();
-    Node* n = rep_->nodes_[r];
+    Node* n = rep_->nodes_[static_cast<uint32_t>(r)];
     n->masked_ptr = base_internal::HidePtr(ptr);
     n->nstack = 0;
     n->priority = 0;
@@ -438,12 +445,12 @@
   if (i == -1) {
     return;
   }
-  Node* x = rep_->nodes_[i];
+  Node* x = rep_->nodes_[static_cast<uint32_t>(i)];
   HASH_FOR_EACH(y, x->out) {
-    rep_->nodes_[y]->in.erase(i);
+    rep_->nodes_[static_cast<uint32_t>(y)]->in.erase(i);
   }
   HASH_FOR_EACH(y, x->in) {
-    rep_->nodes_[y]->out.erase(i);
+    rep_->nodes_[static_cast<uint32_t>(y)]->out.erase(i);
   }
   x->in.clear();
   x->out.clear();
@@ -519,7 +526,7 @@
     // Since we do not call Reorder() on this path, clear any visited
     // markers left by ForwardDFS.
     for (const auto& d : r->deltaf_) {
-      r->nodes_[d]->visited = false;
+      r->nodes_[static_cast<uint32_t>(d)]->visited = false;
     }
     return false;
   }
@@ -537,14 +544,14 @@
   while (!r->stack_.empty()) {
     n = r->stack_.back();
     r->stack_.pop_back();
-    Node* nn = r->nodes_[n];
+    Node* nn = r->nodes_[static_cast<uint32_t>(n)];
     if (nn->visited) continue;
 
     nn->visited = true;
     r->deltaf_.push_back(n);
 
     HASH_FOR_EACH(w, nn->out) {
-      Node* nw = r->nodes_[w];
+      Node* nw = r->nodes_[static_cast<uint32_t>(w)];
       if (nw->rank == upper_bound) {
         return false;  // Cycle
       }
@@ -563,14 +570,14 @@
   while (!r->stack_.empty()) {
     n = r->stack_.back();
     r->stack_.pop_back();
-    Node* nn = r->nodes_[n];
+    Node* nn = r->nodes_[static_cast<uint32_t>(n)];
     if (nn->visited) continue;
 
     nn->visited = true;
     r->deltab_.push_back(n);
 
     HASH_FOR_EACH(w, nn->in) {
-      Node* nw = r->nodes_[w];
+      Node* nw = r->nodes_[static_cast<uint32_t>(w)];
       if (!nw->visited && lower_bound < nw->rank) {
         r->stack_.push_back(w);
       }
@@ -595,7 +602,7 @@
 
   // Assign the ranks in order to the collected list.
   for (uint32_t i = 0; i < r->list_.size(); i++) {
-    r->nodes_[r->list_[i]]->rank = r->merged_[i];
+    r->nodes_[static_cast<uint32_t>(r->list_[i])]->rank = r->merged_[i];
   }
 }
 
@@ -603,7 +610,8 @@
   struct ByRank {
     const Vec<Node*>* nodes;
     bool operator()(int32_t a, int32_t b) const {
-      return (*nodes)[a]->rank < (*nodes)[b]->rank;
+      return (*nodes)[static_cast<uint32_t>(a)]->rank <
+             (*nodes)[static_cast<uint32_t>(b)]->rank;
     }
   };
   ByRank cmp;
@@ -615,8 +623,10 @@
     GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
   for (auto& v : *src) {
     int32_t w = v;
-    v = r->nodes_[w]->rank;         // Replace v entry with its rank
-    r->nodes_[w]->visited = false;  // Prepare for future DFS calls
+    // Replace v entry with its rank
+    v = r->nodes_[static_cast<uint32_t>(w)]->rank;
+    // Prepare for future DFS calls
+    r->nodes_[static_cast<uint32_t>(w)]->visited = false;
     dst->push_back(w);
   }
 }
@@ -646,7 +656,8 @@
     }
 
     if (path_len < max_path_len) {
-      path[path_len] = MakeId(n, rep_->nodes_[n]->version);
+      path[path_len] =
+          MakeId(n, rep_->nodes_[static_cast<uint32_t>(n)]->version);
     }
     path_len++;
     r->stack_.push_back(-1);  // Will remove tentative path entry
@@ -655,7 +666,7 @@
       return path_len;
     }
 
-    HASH_FOR_EACH(w, r->nodes_[n]->out) {
+    HASH_FOR_EACH(w, r->nodes_[static_cast<uint32_t>(n)]->out) {
       if (seen.insert(w)) {
         r->stack_.push_back(w);
       }
diff --git a/abseil-cpp/absl/synchronization/internal/graphcycles_test.cc b/abseil-cpp/absl/synchronization/internal/graphcycles_test.cc
index 74eaffe..3c6ef79 100644
--- a/abseil-cpp/absl/synchronization/internal/graphcycles_test.cc
+++ b/abseil-cpp/absl/synchronization/internal/graphcycles_test.cc
@@ -21,8 +21,9 @@
 #include <vector>
 
 #include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -65,51 +66,51 @@
 }
 
 static void PrintEdges(Edges *edges) {
-  ABSL_RAW_LOG(INFO, "EDGES (%zu)", edges->size());
+  LOG(INFO) << "EDGES (" << edges->size() << ")";
   for (const auto &edge : *edges) {
     int a = edge.from;
     int b = edge.to;
-    ABSL_RAW_LOG(INFO, "%d %d", a, b);
+    LOG(INFO) << a << " " << b;
   }
-  ABSL_RAW_LOG(INFO, "---");
+  LOG(INFO) << "---";
 }
 
 static void PrintGCEdges(Nodes *nodes, const IdMap &id, GraphCycles *gc) {
-  ABSL_RAW_LOG(INFO, "GC EDGES");
+  LOG(INFO) << "GC EDGES";
   for (int a : *nodes) {
     for (int b : *nodes) {
       if (gc->HasEdge(Get(id, a), Get(id, b))) {
-        ABSL_RAW_LOG(INFO, "%d %d", a, b);
+        LOG(INFO) << a << " " << b;
       }
     }
   }
-  ABSL_RAW_LOG(INFO, "---");
+  LOG(INFO) << "---";
 }
 
 static void PrintTransitiveClosure(Nodes *nodes, Edges *edges) {
-  ABSL_RAW_LOG(INFO, "Transitive closure");
+  LOG(INFO) << "Transitive closure";
   for (int a : *nodes) {
     for (int b : *nodes) {
       std::unordered_set<int> seen;
       if (IsReachable(edges, a, b, &seen)) {
-        ABSL_RAW_LOG(INFO, "%d %d", a, b);
+        LOG(INFO) << a << " " << b;
       }
     }
   }
-  ABSL_RAW_LOG(INFO, "---");
+  LOG(INFO) << "---";
 }
 
 static void PrintGCTransitiveClosure(Nodes *nodes, const IdMap &id,
                                      GraphCycles *gc) {
-  ABSL_RAW_LOG(INFO, "GC Transitive closure");
+  LOG(INFO) << "GC Transitive closure";
   for (int a : *nodes) {
     for (int b : *nodes) {
       if (gc->IsReachable(Get(id, a), Get(id, b))) {
-        ABSL_RAW_LOG(INFO, "%d %d", a, b);
+        LOG(INFO) << a << " " << b;
       }
     }
   }
-  ABSL_RAW_LOG(INFO, "---");
+  LOG(INFO) << "---";
 }
 
 static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
@@ -125,9 +126,8 @@
         PrintGCEdges(nodes, id, gc);
         PrintTransitiveClosure(nodes, edges);
         PrintGCTransitiveClosure(nodes, id, gc);
-        ABSL_RAW_LOG(FATAL, "gc_reachable %s reachable %s a %d b %d",
-                     gc_reachable ? "true" : "false",
-                     reachable ? "true" : "false", a, b);
+        LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
+                   << reachable << " a " << a << " b " << b;
       }
     }
   }
@@ -142,7 +142,7 @@
     if (!gc->HasEdge(Get(id, a), Get(id, b))) {
       PrintEdges(edges);
       PrintGCEdges(nodes, id, gc);
-      ABSL_RAW_LOG(FATAL, "!gc->HasEdge(%d, %d)", a, b);
+      LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
     }
   }
   for (const auto &a : *nodes) {
@@ -155,13 +155,12 @@
   if (count != edges->size()) {
     PrintEdges(edges);
     PrintGCEdges(nodes, id, gc);
-    ABSL_RAW_LOG(FATAL, "edges->size() %zu  count %d", edges->size(), count);
+    LOG(FATAL) << "edges->size() " << edges->size() << "  count " << count;
   }
 }
 
 static void CheckInvariants(const GraphCycles &gc) {
-  if (ABSL_PREDICT_FALSE(!gc.CheckInvariants()))
-    ABSL_RAW_LOG(FATAL, "CheckInvariants");
+  CHECK(gc.CheckInvariants()) << "CheckInvariants";
 }
 
 // Returns the index of a randomly chosen node in *nodes.
@@ -309,7 +308,7 @@
       break;
 
     default:
-      ABSL_RAW_LOG(FATAL, "op %d", op);
+      LOG(FATAL) << "op " << op;
     }
 
     // Very rarely, test graph expansion by adding then removing many nodes.
diff --git a/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc b/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc
new file mode 100644
index 0000000..48ea628
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc
@@ -0,0 +1,225 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <algorithm>
+#include <chrono>  // NOLINT(build/c++11)
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr uint64_t KernelTimeout::kNoTimeout;
+constexpr int64_t KernelTimeout::kMaxNanos;
+#endif
+
+int64_t KernelTimeout::SteadyClockNow() {
+  if (!SupportsSteadyClock()) {
+    return absl::GetCurrentTimeNanos();
+  }
+  return std::chrono::duration_cast<std::chrono::nanoseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
+}
+
+KernelTimeout::KernelTimeout(absl::Time t) {
+  // `absl::InfiniteFuture()` is a common "no timeout" value and cheaper to
+  // compare than convert.
+  if (t == absl::InfiniteFuture()) {
+    rep_ = kNoTimeout;
+    return;
+  }
+
+  int64_t unix_nanos = absl::ToUnixNanos(t);
+
+  // A timeout that lands before the unix epoch is converted to 0.
+  // In theory implementations should expire these timeouts immediately.
+  if (unix_nanos < 0) {
+    unix_nanos = 0;
+  }
+
+  // Values greater than or equal to kMaxNanos are converted to infinite.
+  if (unix_nanos >= kMaxNanos) {
+    rep_ = kNoTimeout;
+    return;
+  }
+
+  rep_ = static_cast<uint64_t>(unix_nanos) << 1;
+}
+
+KernelTimeout::KernelTimeout(absl::Duration d) {
+  // `absl::InfiniteDuration()` is a common "no timeout" value and cheaper to
+  // compare than convert.
+  if (d == absl::InfiniteDuration()) {
+    rep_ = kNoTimeout;
+    return;
+  }
+
+  int64_t nanos = absl::ToInt64Nanoseconds(d);
+
+  // Negative durations are normalized to 0.
+  // In theory implementations should expire these timeouts immediately.
+  if (nanos < 0) {
+    nanos = 0;
+  }
+
+  int64_t now = SteadyClockNow();
+  if (nanos > kMaxNanos - now) {
+    // Durations that would be greater than kMaxNanos are converted to infinite.
+    rep_ = kNoTimeout;
+    return;
+  }
+
+  nanos += now;
+  rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
+}
+
+int64_t KernelTimeout::MakeAbsNanos() const {
+  if (!has_timeout()) {
+    return kMaxNanos;
+  }
+
+  int64_t nanos = RawAbsNanos();
+
+  if (is_relative_timeout()) {
+    // We need to change epochs, because the relative timeout might be
+    // represented by an absolute timestamp from another clock.
+    nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
+    int64_t now = absl::GetCurrentTimeNanos();
+    if (nanos > kMaxNanos - now) {
+      // Overflow.
+      nanos = kMaxNanos;
+    } else {
+      nanos += now;
+    }
+  } else if (nanos == 0) {
+    // Some callers have assumed that 0 means no timeout, so instead we return a
+    // time of 1 nanosecond after the epoch.
+    nanos = 1;
+  }
+
+  return nanos;
+}
+
+int64_t KernelTimeout::InNanosecondsFromNow() const {
+  if (!has_timeout()) {
+    return kMaxNanos;
+  }
+
+  int64_t nanos = RawAbsNanos();
+  if (is_absolute_timeout()) {
+    return std::max<int64_t>(nanos - absl::GetCurrentTimeNanos(), 0);
+  }
+  return std::max<int64_t>(nanos - SteadyClockNow(), 0);
+}
+
+struct timespec KernelTimeout::MakeAbsTimespec() const {
+  return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
+}
+
+struct timespec KernelTimeout::MakeRelativeTimespec() const {
+  return absl::ToTimespec(absl::Nanoseconds(InNanosecondsFromNow()));
+}
+
+#ifndef _WIN32
+struct timespec KernelTimeout::MakeClockAbsoluteTimespec(clockid_t c) const {
+  if (!has_timeout()) {
+    return absl::ToTimespec(absl::Nanoseconds(kMaxNanos));
+  }
+
+  int64_t nanos = RawAbsNanos();
+  if (is_absolute_timeout()) {
+    nanos -= absl::GetCurrentTimeNanos();
+  } else {
+    nanos -= SteadyClockNow();
+  }
+
+  struct timespec now;
+  ABSL_RAW_CHECK(clock_gettime(c, &now) == 0, "clock_gettime() failed");
+  absl::Duration from_clock_epoch =
+      absl::DurationFromTimespec(now) + absl::Nanoseconds(nanos);
+  if (from_clock_epoch <= absl::ZeroDuration()) {
+    // Some callers have assumed that 0 means no timeout, so instead we return a
+    // time of 1 nanosecond after the epoch. For safety we also do not return
+    // negative values.
+    return absl::ToTimespec(absl::Nanoseconds(1));
+  }
+  return absl::ToTimespec(from_clock_epoch);
+}
+#endif
+
+KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
+  constexpr DWord kInfinite = std::numeric_limits<DWord>::max();
+
+  if (!has_timeout()) {
+    return kInfinite;
+  }
+
+  constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
+  constexpr uint64_t kMaxValueNanos =
+      std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
+
+  uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
+  if (ns_from_now >= kMaxValueNanos) {
+    // Rounding up would overflow.
+    return kInfinite;
+  }
+  // Convert to milliseconds, always rounding up.
+  uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
+  if (ms_from_now > kInfinite) {
+    return kInfinite;
+  }
+  return static_cast<DWord>(ms_from_now);
+}
+
+std::chrono::time_point<std::chrono::system_clock>
+KernelTimeout::ToChronoTimePoint() const {
+  if (!has_timeout()) {
+    return std::chrono::time_point<std::chrono::system_clock>::max();
+  }
+
+  // The cast to std::microseconds is because (on some platforms) the
+  // std::ratio used by std::chrono::steady_clock doesn't convert to
+  // std::nanoseconds, so it doesn't compile.
+  auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
+      std::chrono::nanoseconds(MakeAbsNanos()));
+  return std::chrono::system_clock::from_time_t(0) + micros;
+}
+
+std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
+  if (!has_timeout()) {
+    return std::chrono::nanoseconds::max();
+  }
+  return std::chrono::nanoseconds(InNanosecondsFromNow());
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/synchronization/internal/kernel_timeout.h b/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
index 1084e1e..06404a7 100644
--- a/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
+++ b/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
@@ -11,24 +11,21 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components that are friends of this class. Higher-level components
-// should build APIs based on absl::Time and absl::Duration.
 
 #ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
 #define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
 
-#include <time.h>
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
 #include <algorithm>
+#include <chrono>  // NOLINT(build/c++11)
+#include <cstdint>
+#include <ctime>
 #include <limits>
 
+#include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/time/clock.h"
 #include "absl/time/time.h"
@@ -37,56 +34,73 @@
 ABSL_NAMESPACE_BEGIN
 namespace synchronization_internal {
 
-class Futex;
-class Waiter;
-
+// An optional timeout, with nanosecond granularity.
+//
+// This is a private low-level API for use by a handful of low-level
+// components. Higher-level components should build APIs based on
+// absl::Time and absl::Duration.
 class KernelTimeout {
  public:
-  // A timeout that should expire at <t>.  Any value, in the full
-  // InfinitePast() to InfiniteFuture() range, is valid here and will be
-  // respected.
-  explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
-  // No timeout.
-  KernelTimeout() : ns_(0) {}
+  // Construct an absolute timeout that should expire at `t`.
+  explicit KernelTimeout(absl::Time t);
 
-  // A more explicit factory for those who prefer it.  Equivalent to {}.
-  static KernelTimeout Never() { return {}; }
+  // Construct a relative timeout that should expire after `d`.
+  explicit KernelTimeout(absl::Duration d);
 
-  // We explicitly do not support other custom formats: timespec, int64_t nanos.
-  // Unify on this and absl::Time, please.
+  // Infinite timeout.
+  constexpr KernelTimeout() : rep_(kNoTimeout) {}
 
-  bool has_timeout() const { return ns_ != 0; }
+  // A more explicit factory for those who prefer it.
+  // Equivalent to `KernelTimeout()`.
+  static constexpr KernelTimeout Never() { return KernelTimeout(); }
 
-  // Convert to parameter for sem_timedwait/futex/similar.  Only for approved
-  // users.  Do not call if !has_timeout.
-  struct timespec MakeAbsTimespec();
+  // Returns true if there is a timeout that will eventually expire.
+  // Returns false if the timeout is infinite.
+  bool has_timeout() const { return rep_ != kNoTimeout; }
 
- private:
-  // internal rep, not user visible: ns after unix epoch.
-  // zero = no timeout.
-  // Negative we treat as an unlikely (and certainly expired!) but valid
-  // timeout.
-  int64_t ns_;
+  // If `has_timeout()` is true, returns true if the timeout was provided as an
+  // `absl::Time`. The return value is undefined if `has_timeout()` is false
+  // because all indefinite timeouts are equivalent.
+  bool is_absolute_timeout() const { return (rep_ & 1) == 0; }
 
-  static int64_t MakeNs(absl::Time t) {
-    // optimization--InfiniteFuture is common "no timeout" value
-    // and cheaper to compare than convert.
-    if (t == absl::InfiniteFuture()) return 0;
-    int64_t x = ToUnixNanos(t);
+  // If `has_timeout()` is true, returns true if the timeout was provided as an
+  // `absl::Duration`. The return value is undefined if `has_timeout()` is false
+  // because all indefinite timeouts are equivalent.
+  bool is_relative_timeout() const { return (rep_ & 1) == 1; }
 
-    // A timeout that lands exactly on the epoch (x=0) needs to be respected,
-    // so we alter it unnoticably to 1.  Negative timeouts are in
-    // theory supported, but handled poorly by the kernel (long
-    // delays) so push them forward too; since all such times have
-    // already passed, it's indistinguishable.
-    if (x <= 0) x = 1;
-    // A time larger than what can be represented to the kernel is treated
-    // as no timeout.
-    if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
-    return x;
-  }
+  // Convert to `struct timespec` for interfaces that expect an absolute
+  // timeout. If !has_timeout() or is_relative_timeout(), attempts to convert to
+  // a reasonable absolute timeout, but callers should to test has_timeout() and
+  // is_relative_timeout() and prefer to use a more appropriate interface.
+  struct timespec MakeAbsTimespec() const;
 
-#ifdef _WIN32
+  // Convert to `struct timespec` for interfaces that expect a relative
+  // timeout. If !has_timeout() or is_absolute_timeout(), attempts to convert to
+  // a reasonable relative timeout, but callers should to test has_timeout() and
+  // is_absolute_timeout() and prefer to use a more appropriate interface. Since
+  // the return value is a relative duration, it should be recomputed by calling
+  // this method in the case of a spurious wakeup.
+  struct timespec MakeRelativeTimespec() const;
+
+#ifndef _WIN32
+  // Convert to `struct timespec` for interfaces that expect an absolute timeout
+  // on a specific clock `c`. This is similar to `MakeAbsTimespec()`, but
+  // callers usually want to use this method with `CLOCK_MONOTONIC` when
+  // relative timeouts are requested, and when the appropriate interface expects
+  // an absolute timeout relative to a specific clock (for example,
+  // pthread_cond_clockwait() or sem_clockwait()). If !has_timeout(), attempts
+  // to convert to a reasonable absolute timeout, but callers should to test
+  // has_timeout() prefer to use a more appropriate interface.
+  struct timespec MakeClockAbsoluteTimespec(clockid_t c) const;
+#endif
+
+  // Convert to unix epoch nanos for interfaces that expect an absolute timeout
+  // in nanoseconds. If !has_timeout() or is_relative_timeout(), attempts to
+  // convert to a reasonable absolute timeout, but callers should to test
+  // has_timeout() and is_relative_timeout() and prefer to use a more
+  // appropriate interface.
+  int64_t MakeAbsNanos() const;
+
   // Converts to milliseconds from now, or INFINITE when
   // !has_timeout(). For use by SleepConditionVariableSRW on
   // Windows. Callers should recognize that the return value is a
@@ -96,58 +110,67 @@
   // so we define our own DWORD and INFINITE instead of getting them from
   // <intsafe.h> and <WinBase.h>.
   typedef unsigned long DWord;  // NOLINT
-  DWord InMillisecondsFromNow() const {
-    constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
-    if (!has_timeout()) {
-      return kInfinite;
-    }
-    // The use of absl::Now() to convert from absolute time to
-    // relative time means that absl::Now() cannot use anything that
-    // depends on KernelTimeout (for example, Mutex) on Windows.
-    int64_t now = ToUnixNanos(absl::Now());
-    if (ns_ >= now) {
-      // Round up so that Now() + ms_from_now >= ns_.
-      constexpr uint64_t max_nanos =
-          (std::numeric_limits<int64_t>::max)() - 999999u;
-      uint64_t ms_from_now =
-          (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
-      if (ms_from_now > kInfinite) {
-        return kInfinite;
-      }
-      return static_cast<DWord>(ms_from_now);
-    }
-    return 0;
-  }
-#endif
+  DWord InMillisecondsFromNow() const;
 
-  friend class Futex;
-  friend class Waiter;
+  // Convert to std::chrono::time_point for interfaces that expect an absolute
+  // timeout, like std::condition_variable::wait_until(). If !has_timeout() or
+  // is_relative_timeout(), attempts to convert to a reasonable absolute
+  // timeout, but callers should test has_timeout() and is_relative_timeout()
+  // and prefer to use a more appropriate interface.
+  std::chrono::time_point<std::chrono::system_clock> ToChronoTimePoint() const;
+
+  // Convert to std::chrono::time_point for interfaces that expect a relative
+  // timeout, like std::condition_variable::wait_for(). If !has_timeout() or
+  // is_absolute_timeout(), attempts to convert to a reasonable relative
+  // timeout, but callers should test has_timeout() and is_absolute_timeout()
+  // and prefer to use a more appropriate interface. Since the return value is a
+  // relative duration, it should be recomputed by calling this method in the
+  // case of a spurious wakeup.
+  std::chrono::nanoseconds ToChronoDuration() const;
+
+  // Returns true if steady (aka monotonic) clocks are supported by the system.
+  // This method exists because go/btm requires synchronized clocks, and
+  // thus requires we use the system (aka walltime) clock.
+  static constexpr bool SupportsSteadyClock() { return true; }
+
+ private:
+  // Returns the current time, expressed as a count of nanoseconds since the
+  // epoch used by an arbitrary clock. The implementation tries to use a steady
+  // (monotonic) clock if one is available.
+  static int64_t SteadyClockNow();
+
+  // Internal representation.
+  //   - If the value is kNoTimeout, then the timeout is infinite, and
+  //     has_timeout() will return true.
+  //   - If the low bit is 0, then the high 63 bits is the number of nanoseconds
+  //     after the unix epoch.
+  //   - If the low bit is 1, then the high 63 bits is the number of nanoseconds
+  //     after the epoch used by SteadyClockNow().
+  //
+  // In all cases the time is stored as an absolute time, the only difference is
+  // the clock epoch. The use of absolute times is important since in the case
+  // of a relative timeout with a spurious wakeup, the program would have to
+  // restart the wait, and thus needs a way of recomputing the remaining time.
+  uint64_t rep_;
+
+  // Returns the number of nanoseconds stored in the internal representation.
+  // When combined with the clock epoch indicated by the low bit (which is
+  // accessed through is_absolute_timeout() and is_relative_timeout()), the
+  // return value is used to compute when the timeout should occur.
+  int64_t RawAbsNanos() const { return static_cast<int64_t>(rep_ >> 1); }
+
+  // Converts to nanoseconds from now. Since the return value is a relative
+  // duration, it should be recomputed by calling this method in the case of a
+  // spurious wakeup.
+  int64_t InNanosecondsFromNow() const;
+
+  // A value that represents no timeout (or an infinite timeout).
+  static constexpr uint64_t kNoTimeout = (std::numeric_limits<uint64_t>::max)();
+
+  // The maximum value that can be stored in the high 63 bits.
+  static constexpr int64_t kMaxNanos = (std::numeric_limits<int64_t>::max)();
 };
 
-inline struct timespec KernelTimeout::MakeAbsTimespec() {
-  int64_t n = ns_;
-  static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
-  if (n == 0) {
-    ABSL_RAW_LOG(
-        ERROR, "Tried to create a timespec from a non-timeout; never do this.");
-    // But we'll try to continue sanely.  no-timeout ~= saturated timeout.
-    n = (std::numeric_limits<int64_t>::max)();
-  }
-
-  // Kernel APIs validate timespecs as being at or after the epoch,
-  // despite the kernel time type being signed.  However, no one can
-  // tell the difference between a timeout at or before the epoch (since
-  // all such timeouts have expired!)
-  if (n < 0) n = 0;
-
-  struct timespec abstime;
-  int64_t seconds = (std::min)(n / kNanosPerSecond,
-                             int64_t{(std::numeric_limits<time_t>::max)()});
-  abstime.tv_sec = static_cast<time_t>(seconds);
-  abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
-  return abstime;
-}
-
 }  // namespace synchronization_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/synchronization/internal/kernel_timeout_test.cc b/abseil-cpp/absl/synchronization/internal/kernel_timeout_test.cc
new file mode 100644
index 0000000..92ed269
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/kernel_timeout_test.cc
@@ -0,0 +1,394 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#include <ctime>
+#include <chrono>  // NOLINT(build/c++11)
+#include <limits>
+
+#include "absl/base/config.h"
+#include "absl/random/random.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// Test go/btm support by randomizing the value of clock_gettime() for
+// CLOCK_MONOTONIC. This works by overriding a weak symbol in glibc.
+// We should be resistant to this randomization when !SupportsSteadyClock().
+#if defined(__GOOGLE_GRTE_VERSION__) &&      \
+    !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+    !defined(ABSL_HAVE_MEMORY_SANITIZER) &&  \
+    !defined(ABSL_HAVE_THREAD_SANITIZER)
+extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
+
+extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
+  if (c == CLOCK_MONOTONIC &&
+      !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
+    absl::SharedBitGen gen;
+    ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
+    ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
+    return 0;
+  }
+  return __clock_gettime(c, ts);
+}
+#endif
+
+namespace {
+
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+    defined(ABSL_HAVE_MEMORY_SANITIZER) ||  \
+    defined(ABSL_HAVE_THREAD_SANITIZER) || \
+    defined(__ANDROID__) || \
+    defined(_WIN32) || defined(_WIN64)
+constexpr absl::Duration kTimingBound = absl::Milliseconds(5);
+#else
+constexpr absl::Duration kTimingBound = absl::Microseconds(250);
+#endif
+
+using absl::synchronization_internal::KernelTimeout;
+
+TEST(KernelTimeout, FiniteTimes) {
+  constexpr absl::Duration kDurationsToTest[] = {
+    absl::ZeroDuration(),
+    absl::Nanoseconds(1),
+    absl::Microseconds(1),
+    absl::Milliseconds(1),
+    absl::Seconds(1),
+    absl::Minutes(1),
+    absl::Hours(1),
+    absl::Hours(1000),
+    -absl::Nanoseconds(1),
+    -absl::Microseconds(1),
+    -absl::Milliseconds(1),
+    -absl::Seconds(1),
+    -absl::Minutes(1),
+    -absl::Hours(1),
+    -absl::Hours(1000),
+  };
+
+  for (auto duration : kDurationsToTest) {
+    const absl::Time now = absl::Now();
+    const absl::Time when = now + duration;
+    SCOPED_TRACE(duration);
+    KernelTimeout t(when);
+    EXPECT_TRUE(t.has_timeout());
+    EXPECT_TRUE(t.is_absolute_timeout());
+    EXPECT_FALSE(t.is_relative_timeout());
+    EXPECT_EQ(absl::TimeFromTimespec(t.MakeAbsTimespec()), when);
+#ifndef _WIN32
+    EXPECT_LE(
+        absl::AbsDuration(absl::Now() + duration -
+                          absl::TimeFromTimespec(
+                              t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
+        absl::Milliseconds(10));
+#endif
+    EXPECT_LE(
+        absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
+                          std::max(duration, absl::ZeroDuration())),
+        kTimingBound);
+    EXPECT_EQ(absl::FromUnixNanos(t.MakeAbsNanos()), when);
+    EXPECT_LE(absl::AbsDuration(absl::Milliseconds(t.InMillisecondsFromNow()) -
+                                std::max(duration, absl::ZeroDuration())),
+              absl::Milliseconds(5));
+    EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoTimePoint()) - when),
+              absl::Microseconds(1));
+    EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) -
+                                std::max(duration, absl::ZeroDuration())),
+              kTimingBound);
+  }
+}
+
+TEST(KernelTimeout, InfiniteFuture) {
+  KernelTimeout t(absl::InfiniteFuture());
+  EXPECT_FALSE(t.has_timeout());
+  // Callers are expected to check has_timeout() instead of using the methods
+  // below, but we do try to do something reasonable if they don't. We may not
+  // be able to round-trip back to absl::InfiniteDuration() or
+  // absl::InfiniteFuture(), but we should return a very large value.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_EQ(t.InMillisecondsFromNow(),
+            std::numeric_limits<KernelTimeout::DWord>::max());
+  EXPECT_EQ(t.ToChronoTimePoint(),
+            std::chrono::time_point<std::chrono::system_clock>::max());
+  EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, DefaultConstructor) {
+  // The default constructor is equivalent to absl::InfiniteFuture().
+  KernelTimeout t;
+  EXPECT_FALSE(t.has_timeout());
+  // Callers are expected to check has_timeout() instead of using the methods
+  // below, but we do try to do something reasonable if they don't. We may not
+  // be able to round-trip back to absl::InfiniteDuration() or
+  // absl::InfiniteFuture(), but we should return a very large value.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_EQ(t.InMillisecondsFromNow(),
+            std::numeric_limits<KernelTimeout::DWord>::max());
+  EXPECT_EQ(t.ToChronoTimePoint(),
+            std::chrono::time_point<std::chrono::system_clock>::max());
+  EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, TimeMaxNanos) {
+  // Time >= kMaxNanos should behave as no timeout.
+  KernelTimeout t(absl::FromUnixNanos(std::numeric_limits<int64_t>::max()));
+  EXPECT_FALSE(t.has_timeout());
+  // Callers are expected to check has_timeout() instead of using the methods
+  // below, but we do try to do something reasonable if they don't. We may not
+  // be able to round-trip back to absl::InfiniteDuration() or
+  // absl::InfiniteFuture(), but we should return a very large value.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_EQ(t.InMillisecondsFromNow(),
+            std::numeric_limits<KernelTimeout::DWord>::max());
+  EXPECT_EQ(t.ToChronoTimePoint(),
+            std::chrono::time_point<std::chrono::system_clock>::max());
+  EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, Never) {
+  // KernelTimeout::Never() is equivalent to absl::InfiniteFuture().
+  KernelTimeout t = KernelTimeout::Never();
+  EXPECT_FALSE(t.has_timeout());
+  // Callers are expected to check has_timeout() instead of using the methods
+  // below, but we do try to do something reasonable if they don't. We may not
+  // be able to round-trip back to absl::InfiniteDuration() or
+  // absl::InfiniteFuture(), but we should return a very large value.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_EQ(t.InMillisecondsFromNow(),
+            std::numeric_limits<KernelTimeout::DWord>::max());
+  EXPECT_EQ(t.ToChronoTimePoint(),
+            std::chrono::time_point<std::chrono::system_clock>::max());
+  EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, InfinitePast) {
+  KernelTimeout t(absl::InfinitePast());
+  EXPECT_TRUE(t.has_timeout());
+  EXPECT_TRUE(t.is_absolute_timeout());
+  EXPECT_FALSE(t.is_relative_timeout());
+  EXPECT_LE(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::FromUnixNanos(1));
+#ifndef _WIN32
+  EXPECT_LE(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::FromUnixSeconds(1));
+#endif
+  EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::ZeroDuration());
+  EXPECT_LE(absl::FromUnixNanos(t.MakeAbsNanos()), absl::FromUnixNanos(1));
+  EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+  EXPECT_LT(t.ToChronoTimePoint(), std::chrono::system_clock::from_time_t(0) +
+                                       std::chrono::seconds(1));
+  EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
+}
+
+TEST(KernelTimeout, FiniteDurations) {
+  constexpr absl::Duration kDurationsToTest[] = {
+    absl::ZeroDuration(),
+    absl::Nanoseconds(1),
+    absl::Microseconds(1),
+    absl::Milliseconds(1),
+    absl::Seconds(1),
+    absl::Minutes(1),
+    absl::Hours(1),
+    absl::Hours(1000),
+  };
+
+  for (auto duration : kDurationsToTest) {
+    SCOPED_TRACE(duration);
+    KernelTimeout t(duration);
+    EXPECT_TRUE(t.has_timeout());
+    EXPECT_FALSE(t.is_absolute_timeout());
+    EXPECT_TRUE(t.is_relative_timeout());
+    EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+                                absl::TimeFromTimespec(t.MakeAbsTimespec())),
+              absl::Milliseconds(5));
+#ifndef _WIN32
+    EXPECT_LE(
+        absl::AbsDuration(absl::Now() + duration -
+                          absl::TimeFromTimespec(
+                              t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
+        absl::Milliseconds(5));
+#endif
+    EXPECT_LE(
+        absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
+                          duration),
+        kTimingBound);
+    EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+                                absl::FromUnixNanos(t.MakeAbsNanos())),
+              absl::Milliseconds(5));
+    EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
+              absl::Milliseconds(5));
+    EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+                                absl::FromChrono(t.ToChronoTimePoint())),
+              kTimingBound);
+    EXPECT_LE(
+        absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) - duration),
+        kTimingBound);
+  }
+}
+
+TEST(KernelTimeout, NegativeDurations) {
+  constexpr absl::Duration kDurationsToTest[] = {
+    -absl::ZeroDuration(),
+    -absl::Nanoseconds(1),
+    -absl::Microseconds(1),
+    -absl::Milliseconds(1),
+    -absl::Seconds(1),
+    -absl::Minutes(1),
+    -absl::Hours(1),
+    -absl::Hours(1000),
+    -absl::InfiniteDuration(),
+  };
+
+  for (auto duration : kDurationsToTest) {
+    // Negative durations should all be converted to zero durations or "now".
+    SCOPED_TRACE(duration);
+    KernelTimeout t(duration);
+    EXPECT_TRUE(t.has_timeout());
+    EXPECT_FALSE(t.is_absolute_timeout());
+    EXPECT_TRUE(t.is_relative_timeout());
+    EXPECT_LE(absl::AbsDuration(absl::Now() -
+                                absl::TimeFromTimespec(t.MakeAbsTimespec())),
+              absl::Milliseconds(5));
+#ifndef _WIN32
+    EXPECT_LE(absl::AbsDuration(absl::Now() - absl::TimeFromTimespec(
+                                                  t.MakeClockAbsoluteTimespec(
+                                                      CLOCK_REALTIME))),
+              absl::Milliseconds(5));
+#endif
+    EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+              absl::ZeroDuration());
+    EXPECT_LE(
+        absl::AbsDuration(absl::Now() - absl::FromUnixNanos(t.MakeAbsNanos())),
+        absl::Milliseconds(5));
+    EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+    EXPECT_LE(absl::AbsDuration(absl::Now() -
+                                absl::FromChrono(t.ToChronoTimePoint())),
+              absl::Milliseconds(5));
+    EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
+  }
+}
+
+TEST(KernelTimeout, InfiniteDuration) {
+  KernelTimeout t(absl::InfiniteDuration());
+  EXPECT_FALSE(t.has_timeout());
+  // Callers are expected to check has_timeout() instead of using the methods
+  // below, but we do try to do something reasonable if they don't. We may not
+  // be able to round-trip back to absl::InfiniteDuration() or
+  // absl::InfiniteFuture(), but we should return a very large value.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_EQ(t.InMillisecondsFromNow(),
+            std::numeric_limits<KernelTimeout::DWord>::max());
+  EXPECT_EQ(t.ToChronoTimePoint(),
+            std::chrono::time_point<std::chrono::system_clock>::max());
+  EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, DurationMaxNanos) {
+  // Duration >= kMaxNanos should behave as no timeout.
+  KernelTimeout t(absl::Nanoseconds(std::numeric_limits<int64_t>::max()));
+  EXPECT_FALSE(t.has_timeout());
+  // Callers are expected to check has_timeout() instead of using the methods
+  // below, but we do try to do something reasonable if they don't. We may not
+  // be able to round-trip back to absl::InfiniteDuration() or
+  // absl::InfiniteFuture(), but we should return a very large value.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_EQ(t.InMillisecondsFromNow(),
+            std::numeric_limits<KernelTimeout::DWord>::max());
+  EXPECT_EQ(t.ToChronoTimePoint(),
+            std::chrono::time_point<std::chrono::system_clock>::max());
+  EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, OverflowNanos) {
+  // Test what happens when KernelTimeout is constructed with an absl::Duration
+  // that would overflow now_nanos + duration.
+  int64_t now_nanos = absl::ToUnixNanos(absl::Now());
+  int64_t limit = std::numeric_limits<int64_t>::max() - now_nanos;
+  absl::Duration duration = absl::Nanoseconds(limit) + absl::Seconds(1);
+  KernelTimeout t(duration);
+  // Timeouts should still be far in the future.
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+            absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+  EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+            absl::Now() + absl::Hours(100000));
+#endif
+  EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+            absl::Hours(100000));
+  EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+            absl::Now() + absl::Hours(100000));
+  EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
+            absl::Milliseconds(5));
+  EXPECT_GT(t.ToChronoTimePoint(),
+            std::chrono::system_clock::now() + std::chrono::hours(100000));
+  EXPECT_GT(t.ToChronoDuration(), std::chrono::hours(100000));
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc b/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc
deleted file mode 100644
index d83bc8a..0000000
--- a/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc
+++ /dev/null
@@ -1,249 +0,0 @@
-// Do not include.  This is an implementation detail of base/mutex.h.
-//
-// Declares three classes:
-//
-// base::internal::MutexImpl - implementation helper for Mutex
-// base::internal::CondVarImpl - implementation helper for CondVar
-// base::internal::SynchronizationStorage<T> - implementation helper for
-//                                             Mutex, CondVar
-
-#include <type_traits>
-
-#if defined(_WIN32)
-#include <condition_variable>
-#include <mutex>
-#else
-#include <pthread.h>
-#endif
-
-#include "absl/base/call_once.h"
-#include "absl/time/time.h"
-
-// Declare that Mutex::ReaderLock is actually Lock().  Intended primarily
-// for tests, and even then as a last resort.
-#ifdef ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
-#error ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE cannot be directly set
-#else
-#define ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE 1
-#endif
-
-// Declare that Mutex::EnableInvariantDebugging is not implemented.
-// Intended primarily for tests, and even then as a last resort.
-#ifdef ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED
-#error ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED cannot be directly set
-#else
-#define ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED 1
-#endif
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-class Condition;
-
-namespace synchronization_internal {
-
-class MutexImpl;
-
-// Do not use this implementation detail of CondVar. Provides most of the
-// implementation, but should not be placed directly in static storage
-// because it will not linker initialize properly. See
-// SynchronizationStorage<T> below for what we mean by linker
-// initialization.
-class CondVarImpl {
- public:
-  CondVarImpl();
-  CondVarImpl(const CondVarImpl&) = delete;
-  CondVarImpl& operator=(const CondVarImpl&) = delete;
-  ~CondVarImpl();
-
-  void Signal();
-  void SignalAll();
-  void Wait(MutexImpl* mutex);
-  bool WaitWithDeadline(MutexImpl* mutex, absl::Time deadline);
-
- private:
-#if defined(_WIN32)
-  std::condition_variable_any std_cv_;
-#else
-  pthread_cond_t pthread_cv_;
-#endif
-};
-
-// Do not use this implementation detail of Mutex. Provides most of the
-// implementation, but should not be placed directly in static storage
-// because it will not linker initialize properly. See
-// SynchronizationStorage<T> below for what we mean by linker
-// initialization.
-class MutexImpl {
- public:
-  MutexImpl();
-  MutexImpl(const MutexImpl&) = delete;
-  MutexImpl& operator=(const MutexImpl&) = delete;
-  ~MutexImpl();
-
-  void Lock();
-  bool TryLock();
-  void Unlock();
-  void Await(const Condition& cond);
-  bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
-
- private:
-  friend class CondVarImpl;
-
-#if defined(_WIN32)
-  std::mutex std_mutex_;
-#else
-  pthread_mutex_t pthread_mutex_;
-#endif
-
-  // True if the underlying mutex is locked.  If the destructor is entered
-  // while locked_, the underlying mutex is unlocked.  Mutex supports
-  // destruction while locked, but the same is undefined behavior for both
-  // pthread_mutex_t and std::mutex.
-  bool locked_ = false;
-
-  // Signaled before releasing the lock, in support of Await.
-  CondVarImpl released_;
-};
-
-// Do not use this implementation detail of CondVar and Mutex.  A storage
-// space for T that supports a LinkerInitialized constructor. T must
-// have a default constructor, which is called by the first call to
-// get(). T's destructor is never called if the LinkerInitialized
-// constructor is called.
-//
-// Objects constructed with the default constructor are constructed and
-// destructed like any other object, and should never be allocated in
-// static storage.
-//
-// Objects constructed with the LinkerInitialized constructor should
-// always be in static storage. For such objects, calls to get() are always
-// valid, except from signal handlers.
-//
-// Note that this implementation relies on undefined language behavior that
-// are known to hold for the set of supported compilers. An analysis
-// follows.
-//
-// From the C++11 standard:
-//
-// [basic.life] says an object has non-trivial initialization if it is of
-// class type and it is initialized by a constructor other than a trivial
-// default constructor.  (the LinkerInitialized constructor is
-// non-trivial)
-//
-// [basic.life] says the lifetime of an object with a non-trivial
-// constructor begins when the call to the constructor is complete.
-//
-// [basic.life] says the lifetime of an object with non-trivial destructor
-// ends when the call to the destructor begins.
-//
-// [basic.life] p5 specifies undefined behavior when accessing non-static
-// members of an instance outside its
-// lifetime. (SynchronizationStorage::get() access non-static members)
-//
-// So, LinkerInitialized object of SynchronizationStorage uses a
-// non-trivial constructor, which is called at some point during dynamic
-// initialization, and is therefore subject to order of dynamic
-// initialization bugs, where get() is called before the object's
-// constructor is, resulting in undefined behavior.
-//
-// Similarly, a LinkerInitialized SynchronizationStorage object has a
-// non-trivial destructor, and so its lifetime ends at some point during
-// destruction of objects with static storage duration [basic.start.term]
-// p4. There is a window where other exit code could call get() after this
-// occurs, resulting in undefined behavior.
-//
-// Combined, these statements imply that LinkerInitialized instances
-// of SynchronizationStorage<T> rely on undefined behavior.
-//
-// However, in practice, the implementation works on all supported
-// compilers. Specifically, we rely on:
-//
-// a) zero-initialization being sufficient to initialize
-// LinkerInitialized instances for the purposes of calling
-// get(), regardless of when the constructor is called. This is
-// because the is_dynamic_ boolean is correctly zero-initialized to
-// false.
-//
-// b) the LinkerInitialized constructor is a NOP, and immaterial to
-// even to concurrent calls to get().
-//
-// c) the destructor being a NOP for LinkerInitialized objects
-// (guaranteed by a check for !is_dynamic_), and so any concurrent and
-// subsequent calls to get() functioning as if the destructor were not
-// called, by virtue of the instances' storage remaining valid after the
-// destructor runs.
-//
-// d) That a-c apply transitively when SynchronizationStorage<T> is the
-// only member of a class allocated in static storage.
-//
-// Nothing in the language standard guarantees that a-d hold.  In practice,
-// these hold in all supported compilers.
-//
-// Future direction:
-//
-// Ideally, we would simply use std::mutex or a similar class, which when
-// allocated statically would support use immediately after static
-// initialization up until static storage is reclaimed (i.e. the properties
-// we require of all "linker initialized" instances).
-//
-// Regarding construction in static storage, std::mutex is required to
-// provide a constexpr default constructor [thread.mutex.class], which
-// ensures the instance's lifetime begins with static initialization
-// [basic.start.init], and so is immune to any problems caused by the order
-// of dynamic initialization. However, as of this writing Microsoft's
-// Visual Studio does not provide a constexpr constructor for std::mutex.
-// See
-// https://blogs.msdn.microsoft.com/vcblog/2015/06/02/constexpr-complete-for-vs-2015-rtm-c11-compiler-c17-stl/
-//
-// Regarding destruction of instances in static storage, [basic.life] does
-// say an object ends when storage in which the occupies is released, in
-// the case of non-trivial destructor. However, std::mutex is not specified
-// to have a trivial destructor.
-//
-// So, we would need a class with a constexpr default constructor and a
-// trivial destructor. Today, we can achieve neither desired property using
-// std::mutex directly.
-template <typename T>
-class SynchronizationStorage {
- public:
-  // Instances allocated on the heap or on the stack should use the default
-  // constructor.
-  SynchronizationStorage()
-      : destruct_(true), once_() {}
-
-  constexpr explicit SynchronizationStorage(absl::ConstInitType)
-      : destruct_(false), once_(), space_{{0}} {}
-
-  SynchronizationStorage(SynchronizationStorage&) = delete;
-  SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
-
-  ~SynchronizationStorage() {
-    if (destruct_) {
-      get()->~T();
-    }
-  }
-
-  // Retrieve the object in storage. This is fast and thread safe, but does
-  // incur the cost of absl::call_once().
-  T* get() {
-    absl::call_once(once_, SynchronizationStorage::Construct, this);
-    return reinterpret_cast<T*>(&space_);
-  }
-
- private:
-  static void Construct(SynchronizationStorage<T>* self) {
-    new (&self->space_) T();
-  }
-
-  // When true, T's destructor is run when this is destructed.
-  const bool destruct_;
-
-  absl::once_flag once_;
-
-  // An aligned space for the T.
-  alignas(T) unsigned char space_[sizeof(T)];
-};
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc b/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
index 821ca9b..c9b8dc1 100644
--- a/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
+++ b/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
@@ -40,17 +40,6 @@
   return identity->blocked_count_ptr;
 }
 
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
-  new (Waiter::GetWaiter(identity)) Waiter();
-  identity->ticker.store(0, std::memory_order_relaxed);
-  identity->wait_start.store(0, std::memory_order_relaxed);
-  identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
-void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
-  Waiter::GetWaiter(identity)->~Waiter();
-}
-
 void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
   const int ticker =
       identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
@@ -58,7 +47,7 @@
   const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
   if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
     // Wakeup the waiting thread since it is time for it to become idle.
-    Waiter::GetWaiter(identity)->Poke();
+    ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
   }
 }
 
@@ -68,12 +57,23 @@
 
 extern "C" {
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalPerThreadSemPost(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+    absl::base_internal::ThreadIdentity *identity) {
+  new (absl::synchronization_internal::Waiter::GetWaiter(identity))
+      absl::synchronization_internal::Waiter();
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
     absl::base_internal::ThreadIdentity *identity) {
   absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
 }
 
-ABSL_ATTRIBUTE_WEAK bool AbslInternalPerThreadSemWait(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+    absl::base_internal::ThreadIdentity *identity) {
+  absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
+}
+
+ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
     absl::synchronization_internal::KernelTimeout t) {
   bool timeout = false;
   absl::base_internal::ThreadIdentity *identity;
diff --git a/abseil-cpp/absl/synchronization/internal/per_thread_sem.h b/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
index 2228b6e..144ab3c 100644
--- a/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
+++ b/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
@@ -64,11 +64,7 @@
  private:
   // Create the PerThreadSem associated with "identity".  Initializes count=0.
   // REQUIRES: May only be called by ThreadIdentity.
-  static void Init(base_internal::ThreadIdentity* identity);
-
-  // Destroy the PerThreadSem associated with "identity".
-  // REQUIRES: May only be called by ThreadIdentity.
-  static void Destroy(base_internal::ThreadIdentity* identity);
+  static inline void Init(base_internal::ThreadIdentity* identity);
 
   // Increments "identity"'s count.
   static inline void Post(base_internal::ThreadIdentity* identity);
@@ -81,8 +77,7 @@
   // Permitted callers.
   friend class PerThreadSemTest;
   friend class absl::Mutex;
-  friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
-  friend void ReclaimThreadIdentity(void* v);
+  friend void OneTimeInitThreadIdentity(absl::base_internal::ThreadIdentity*);
 };
 
 }  // namespace synchronization_internal
@@ -96,20 +91,29 @@
 // By changing our extension points to be extern "C", we dodge this
 // check.
 extern "C" {
-void AbslInternalPerThreadSemPost(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
     absl::base_internal::ThreadIdentity* identity);
-bool AbslInternalPerThreadSemWait(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+    absl::base_internal::ThreadIdentity* identity);
+bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
     absl::synchronization_internal::KernelTimeout t);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+    absl::base_internal::ThreadIdentity* identity);
 }  // extern "C"
 
+void absl::synchronization_internal::PerThreadSem::Init(
+    absl::base_internal::ThreadIdentity* identity) {
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity);
+}
+
 void absl::synchronization_internal::PerThreadSem::Post(
     absl::base_internal::ThreadIdentity* identity) {
-  AbslInternalPerThreadSemPost(identity);
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
 }
 
 bool absl::synchronization_internal::PerThreadSem::Wait(
     absl::synchronization_internal::KernelTimeout t) {
-  return AbslInternalPerThreadSemWait(t);
+  return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
 }
 
 #endif  // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc b/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc
index 8cf59e6..24a6b54 100644
--- a/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc
+++ b/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc
@@ -159,7 +159,7 @@
   const absl::Duration elapsed = absl::Now() - start;
   // Allow for a slight early return, to account for quality of implementation
   // issues on various platforms.
-  const absl::Duration slop = absl::Microseconds(200);
+  const absl::Duration slop = absl::Milliseconds(1);
   EXPECT_LE(delay - slop, elapsed)
       << "Wait returned " << delay - elapsed
       << " early (with " << slop << " slop), start time was " << start;
@@ -174,6 +174,15 @@
   EXPECT_TRUE(Wait(negative_timeout));
 }
 
+TEST_F(PerThreadSemTest, ThreadIdentityReuse) {
+  // Create a base_internal::ThreadIdentity object and keep reusing it. There
+  // should be no memory or resource leaks.
+  for (int i = 0; i < 10000; i++) {
+    std::thread t([]() { GetOrCreateCurrentThreadIdentity(); });
+    t.join();
+  }
+}
+
 }  // namespace
 
 }  // namespace synchronization_internal
diff --git a/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc b/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc
new file mode 100644
index 0000000..bf700e9
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc
@@ -0,0 +1,167 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/pthread_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <cassert>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+namespace {
+class PthreadMutexHolder {
+ public:
+  explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+    const int err = pthread_mutex_lock(mu_);
+    if (err != 0) {
+      ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+    }
+  }
+
+  PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+  PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+  ~PthreadMutexHolder() {
+    const int err = pthread_mutex_unlock(mu_);
+    if (err != 0) {
+      ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+    }
+  }
+
+ private:
+  pthread_mutex_t *mu_;
+};
+}  // namespace
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char PthreadWaiter::kName[];
+#endif
+
+PthreadWaiter::PthreadWaiter() : waiter_count_(0), wakeup_count_(0) {
+  const int err = pthread_mutex_init(&mu_, 0);
+  if (err != 0) {
+    ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+  }
+
+  const int err2 = pthread_cond_init(&cv_, 0);
+  if (err2 != 0) {
+    ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+  }
+}
+
+#ifdef __APPLE__
+#define ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP 1
+#endif
+
+#if defined(__GLIBC__) && \
+    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#endif
+
+// Calls pthread_cond_timedwait() or possibly something else like
+// pthread_cond_timedwait_relative_np() depending on the platform and
+// KernelTimeout requested. The return value is the same as the return
+// value of pthread_cond_timedwait().
+int PthreadWaiter::TimedWait(KernelTimeout t) {
+  assert(t.has_timeout());
+  if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#ifdef ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
+    const auto rel_timeout = t.MakeRelativeTimespec();
+    return pthread_cond_timedwait_relative_np(&cv_, &mu_, &rel_timeout);
+#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT) && \
+    defined(CLOCK_MONOTONIC)
+    const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+    return pthread_cond_clockwait(&cv_, &mu_, CLOCK_MONOTONIC,
+                                  &abs_clock_timeout);
+#endif
+  }
+
+  const auto abs_timeout = t.MakeAbsTimespec();
+  return pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+}
+
+bool PthreadWaiter::Wait(KernelTimeout t) {
+  PthreadMutexHolder h(&mu_);
+  ++waiter_count_;
+  // Loop until we find a wakeup to consume or timeout.
+  // Note that, since the thread ticker is just reset, we don't need to check
+  // whether the thread is idle on the very first pass of the loop.
+  bool first_pass = true;
+  while (wakeup_count_ == 0) {
+    if (!first_pass) MaybeBecomeIdle();
+    // No wakeups available, time to wait.
+    if (!t.has_timeout()) {
+      const int err = pthread_cond_wait(&cv_, &mu_);
+      if (err != 0) {
+        ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+      }
+    } else {
+      const int err = TimedWait(t);
+      if (err == ETIMEDOUT) {
+        --waiter_count_;
+        return false;
+      }
+      if (err != 0) {
+        ABSL_RAW_LOG(FATAL, "PthreadWaiter::TimedWait() failed: %d", err);
+      }
+    }
+    first_pass = false;
+  }
+  // Consume a wakeup and we're done.
+  --wakeup_count_;
+  --waiter_count_;
+  return true;
+}
+
+void PthreadWaiter::Post() {
+  PthreadMutexHolder h(&mu_);
+  ++wakeup_count_;
+  InternalCondVarPoke();
+}
+
+void PthreadWaiter::Poke() {
+  PthreadMutexHolder h(&mu_);
+  InternalCondVarPoke();
+}
+
+void PthreadWaiter::InternalCondVarPoke() {
+  if (waiter_count_ != 0) {
+    const int err = pthread_cond_signal(&cv_);
+    if (ABSL_PREDICT_FALSE(err != 0)) {
+      ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+    }
+  }
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_PTHREAD_WAITER
diff --git a/abseil-cpp/absl/synchronization/internal/pthread_waiter.h b/abseil-cpp/absl/synchronization/internal/pthread_waiter.h
new file mode 100644
index 0000000..206aefa
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/pthread_waiter.h
@@ -0,0 +1,60 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_PTHREAD_WAITER 1
+
+class PthreadWaiter : public WaiterCrtp<PthreadWaiter> {
+ public:
+  PthreadWaiter();
+
+  bool Wait(KernelTimeout t);
+  void Post();
+  void Poke();
+
+  static constexpr char kName[] = "PthreadWaiter";
+
+ private:
+  int TimedWait(KernelTimeout t);
+
+  // REQUIRES: mu_ must be held.
+  void InternalCondVarPoke();
+
+  pthread_mutex_t mu_;
+  pthread_cond_t cv_;
+  int waiter_count_;
+  int wakeup_count_;  // Unclaimed wakeups.
+};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ndef _WIN32
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
diff --git a/abseil-cpp/absl/synchronization/internal/sem_waiter.cc b/abseil-cpp/absl/synchronization/internal/sem_waiter.cc
new file mode 100644
index 0000000..d62dbdc
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/sem_waiter.cc
@@ -0,0 +1,122 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/sem_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_SEM_WAITER
+
+#include <semaphore.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char SemWaiter::kName[];
+#endif
+
+SemWaiter::SemWaiter() : wakeups_(0) {
+  if (sem_init(&sem_, 0, 0) != 0) {
+    ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+  }
+}
+
+#if defined(__GLIBC__) && \
+    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#endif
+
+// Calls sem_timedwait() or possibly something else like
+// sem_clockwait() depending on the platform and
+// KernelTimeout requested. The return value is the same as a call to the return
+// value to a call to sem_timedwait().
+int SemWaiter::TimedWait(KernelTimeout t) {
+  if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#if defined(ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT) && defined(CLOCK_MONOTONIC)
+    const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+    return sem_clockwait(&sem_, CLOCK_MONOTONIC, &abs_clock_timeout);
+#endif
+  }
+
+  const auto abs_timeout = t.MakeAbsTimespec();
+  return sem_timedwait(&sem_, &abs_timeout);
+}
+
+bool SemWaiter::Wait(KernelTimeout t) {
+  // Loop until we timeout or consume a wakeup.
+  // Note that, since the thread ticker is just reset, we don't need to check
+  // whether the thread is idle on the very first pass of the loop.
+  bool first_pass = true;
+  while (true) {
+    int x = wakeups_.load(std::memory_order_relaxed);
+    while (x != 0) {
+      if (!wakeups_.compare_exchange_weak(x, x - 1,
+                                          std::memory_order_acquire,
+                                          std::memory_order_relaxed)) {
+        continue;  // Raced with someone, retry.
+      }
+      // Successfully consumed a wakeup, we're done.
+      return true;
+    }
+
+    if (!first_pass) MaybeBecomeIdle();
+    // Nothing to consume, wait (looping on EINTR).
+    while (true) {
+      if (!t.has_timeout()) {
+        if (sem_wait(&sem_) == 0) break;
+        if (errno == EINTR) continue;
+        ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+      } else {
+        if (TimedWait(t) == 0) break;
+        if (errno == EINTR) continue;
+        if (errno == ETIMEDOUT) return false;
+        ABSL_RAW_LOG(FATAL, "SemWaiter::TimedWait() failed: %d", errno);
+      }
+    }
+    first_pass = false;
+  }
+}
+
+void SemWaiter::Post() {
+  // Post a wakeup.
+  if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+    // We incremented from 0, need to wake a potential waiter.
+    Poke();
+  }
+}
+
+void SemWaiter::Poke() {
+  if (sem_post(&sem_) != 0) {  // Wake any semaphore waiter.
+    ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+  }
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_SEM_WAITER
diff --git a/abseil-cpp/absl/synchronization/internal/sem_waiter.h b/abseil-cpp/absl/synchronization/internal/sem_waiter.h
new file mode 100644
index 0000000..c22746f
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/sem_waiter.h
@@ -0,0 +1,65 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_SEM_WAITER 1
+
+class SemWaiter : public WaiterCrtp<SemWaiter> {
+ public:
+  SemWaiter();
+
+  bool Wait(KernelTimeout t);
+  void Post();
+  void Poke();
+
+  static constexpr char kName[] = "SemWaiter";
+
+ private:
+  int TimedWait(KernelTimeout t);
+
+  sem_t sem_;
+
+  // This seems superfluous, but for Poke() we need to cause spurious
+  // wakeups on the semaphore. Hence we can't actually use the
+  // semaphore's count.
+  std::atomic<int> wakeups_;
+};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_HAVE_SEMAPHORE_H
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
diff --git a/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc b/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc
new file mode 100644
index 0000000..355718a
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc
@@ -0,0 +1,91 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_STDCPP_WAITER
+
+#include <chrono>  // NOLINT(build/c++11)
+#include <condition_variable>  // NOLINT(build/c++11)
+#include <mutex>  // NOLINT(build/c++11)
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char StdcppWaiter::kName[];
+#endif
+
+StdcppWaiter::StdcppWaiter() : waiter_count_(0), wakeup_count_(0) {}
+
+bool StdcppWaiter::Wait(KernelTimeout t) {
+  std::unique_lock<std::mutex> lock(mu_);
+  ++waiter_count_;
+
+  // Loop until we find a wakeup to consume or timeout.
+  // Note that, since the thread ticker is just reset, we don't need to check
+  // whether the thread is idle on the very first pass of the loop.
+  bool first_pass = true;
+  while (wakeup_count_ == 0) {
+    if (!first_pass) MaybeBecomeIdle();
+    // No wakeups available, time to wait.
+    if (!t.has_timeout()) {
+      cv_.wait(lock);
+    } else {
+      auto wait_result = t.SupportsSteadyClock() && t.is_relative_timeout()
+                             ? cv_.wait_for(lock, t.ToChronoDuration())
+                             : cv_.wait_until(lock, t.ToChronoTimePoint());
+      if (wait_result == std::cv_status::timeout) {
+        --waiter_count_;
+        return false;
+      }
+    }
+    first_pass = false;
+  }
+
+  // Consume a wakeup and we're done.
+  --wakeup_count_;
+  --waiter_count_;
+  return true;
+}
+
+void StdcppWaiter::Post() {
+  std::lock_guard<std::mutex> lock(mu_);
+  ++wakeup_count_;
+  InternalCondVarPoke();
+}
+
+void StdcppWaiter::Poke() {
+  std::lock_guard<std::mutex> lock(mu_);
+  InternalCondVarPoke();
+}
+
+void StdcppWaiter::InternalCondVarPoke() {
+  if (waiter_count_ != 0) {
+    cv_.notify_one();
+  }
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_STDCPP_WAITER
diff --git a/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.h b/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.h
new file mode 100644
index 0000000..e592a27
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.h
@@ -0,0 +1,56 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+
+#include <condition_variable>  // NOLINT(build/c++11)
+#include <mutex>  // NOLINT(build/c++11)
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_STDCPP_WAITER 1
+
+class StdcppWaiter : public WaiterCrtp<StdcppWaiter> {
+ public:
+  StdcppWaiter();
+
+  bool Wait(KernelTimeout t);
+  void Post();
+  void Poke();
+
+  static constexpr char kName[] = "StdcppWaiter";
+
+ private:
+  // REQUIRES: mu_ must be held.
+  void InternalCondVarPoke();
+
+  std::mutex mu_;
+  std::condition_variable cv_;
+  int waiter_count_;
+  int wakeup_count_;  // Unclaimed wakeups.
+};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
diff --git a/abseil-cpp/absl/synchronization/internal/thread_pool.h b/abseil-cpp/absl/synchronization/internal/thread_pool.h
index 0cb96da..5eb0bb6 100644
--- a/abseil-cpp/absl/synchronization/internal/thread_pool.h
+++ b/abseil-cpp/absl/synchronization/internal/thread_pool.h
@@ -20,9 +20,11 @@
 #include <functional>
 #include <queue>
 #include <thread>  // NOLINT(build/c++11)
+#include <utility>
 #include <vector>
 
 #include "absl/base/thread_annotations.h"
+#include "absl/functional/any_invocable.h"
 #include "absl/synchronization/mutex.h"
 
 namespace absl {
@@ -33,6 +35,7 @@
 class ThreadPool {
  public:
   explicit ThreadPool(int num_threads) {
+    threads_.reserve(num_threads);
     for (int i = 0; i < num_threads; ++i) {
       threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
     }
@@ -54,7 +57,7 @@
   }
 
   // Schedule a function to be run on a ThreadPool thread immediately.
-  void Schedule(std::function<void()> func) {
+  void Schedule(absl::AnyInvocable<void()> func) {
     assert(func != nullptr);
     absl::MutexLock l(&mu_);
     queue_.push(std::move(func));
@@ -67,7 +70,7 @@
 
   void WorkLoop() {
     while (true) {
-      std::function<void()> func;
+      absl::AnyInvocable<void()> func;
       {
         absl::MutexLock l(&mu_);
         mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
@@ -82,7 +85,7 @@
   }
 
   absl::Mutex mu_;
-  std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
+  std::queue<absl::AnyInvocable<void()>> queue_ ABSL_GUARDED_BY(mu_);
   std::vector<std::thread> threads_;
 };
 
diff --git a/abseil-cpp/absl/synchronization/internal/waiter.cc b/abseil-cpp/absl/synchronization/internal/waiter.cc
deleted file mode 100644
index b6150b9..0000000
--- a/abseil-cpp/absl/synchronization/internal/waiter.cc
+++ /dev/null
@@ -1,492 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/waiter.h"
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
-  base_internal::ThreadIdentity *identity =
-      base_internal::CurrentThreadIdentityIfPresent();
-  assert(identity != nullptr);
-  const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
-  const int ticker = identity->ticker.load(std::memory_order_relaxed);
-  const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
-  if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
-    identity->is_idle.store(true, std::memory_order_relaxed);
-  }
-}
-
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-
-// Some Android headers are missing these definitions even though they
-// support these futex operations.
-#ifdef __BIONIC__
-#ifndef SYS_futex
-#define SYS_futex __NR_futex
-#endif
-#ifndef FUTEX_WAIT_BITSET
-#define FUTEX_WAIT_BITSET 9
-#endif
-#ifndef FUTEX_PRIVATE_FLAG
-#define FUTEX_PRIVATE_FLAG 128
-#endif
-#ifndef FUTEX_CLOCK_REALTIME
-#define FUTEX_CLOCK_REALTIME 256
-#endif
-#ifndef FUTEX_BITSET_MATCH_ANY
-#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
-#endif
-#endif
-
-#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
-#define SYS_futex_time64 __NR_futex_time64
-#endif
-
-#if defined(SYS_futex_time64) && !defined(SYS_futex)
-#define SYS_futex SYS_futex_time64
-#endif
-
-class Futex {
- public:
-  static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
-                       KernelTimeout t) {
-    int err = 0;
-    if (t.has_timeout()) {
-      // https://locklessinc.com/articles/futex_cheat_sheet/
-      // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
-      struct timespec abs_timeout = t.MakeAbsTimespec();
-      // Atomically check that the futex value is still 0, and if it
-      // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
-      err = syscall(
-          SYS_futex, reinterpret_cast<int32_t *>(v),
-          FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
-          &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
-    } else {
-      // Atomically check that the futex value is still 0, and if it
-      // is, sleep until woken by FUTEX_WAKE.
-      err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
-                    FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
-    }
-    if (err != 0) {
-      err = -errno;
-    }
-    return err;
-  }
-
-  static int Wake(std::atomic<int32_t> *v, int32_t count) {
-    int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
-                      FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
-    if (ABSL_PREDICT_FALSE(err < 0)) {
-      err = -errno;
-    }
-    return err;
-  }
-};
-
-Waiter::Waiter() {
-  futex_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
-  // Loop until we can atomically decrement futex from a positive
-  // value, waiting on a futex while we believe it is zero.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (true) {
-    int32_t x = futex_.load(std::memory_order_relaxed);
-    while (x != 0) {
-      if (!futex_.compare_exchange_weak(x, x - 1,
-                                        std::memory_order_acquire,
-                                        std::memory_order_relaxed)) {
-        continue;  // Raced with someone, retry.
-      }
-      return true;  // Consumed a wakeup, we are done.
-    }
-
-
-    if (!first_pass) MaybeBecomeIdle();
-    const int err = Futex::WaitUntil(&futex_, 0, t);
-    if (err != 0) {
-      if (err == -EINTR || err == -EWOULDBLOCK) {
-        // Do nothing, the loop will retry.
-      } else if (err == -ETIMEDOUT) {
-        return false;
-      } else {
-        ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
-      }
-    }
-    first_pass = false;
-  }
-}
-
-void Waiter::Post() {
-  if (futex_.fetch_add(1, std::memory_order_release) == 0) {
-    // We incremented from 0, need to wake a potential waiter.
-    Poke();
-  }
-}
-
-void Waiter::Poke() {
-  // Wake one thread waiting on the futex.
-  const int err = Futex::Wake(&futex_, 1);
-  if (ABSL_PREDICT_FALSE(err < 0)) {
-    ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
-  }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
-  explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
-    const int err = pthread_mutex_lock(mu_);
-    if (err != 0) {
-      ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
-    }
-  }
-
-  PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
-  PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
-  ~PthreadMutexHolder() {
-    const int err = pthread_mutex_unlock(mu_);
-    if (err != 0) {
-      ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
-    }
-  }
-
- private:
-  pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
-  const int err = pthread_mutex_init(&mu_, 0);
-  if (err != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
-  }
-
-  const int err2 = pthread_cond_init(&cv_, 0);
-  if (err2 != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
-  }
-
-  waiter_count_ = 0;
-  wakeup_count_ = 0;
-}
-
-Waiter::~Waiter() {
-  const int err = pthread_mutex_destroy(&mu_);
-  if (err != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
-  }
-
-  const int err2 = pthread_cond_destroy(&cv_);
-  if (err2 != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
-  }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
-  struct timespec abs_timeout;
-  if (t.has_timeout()) {
-    abs_timeout = t.MakeAbsTimespec();
-  }
-
-  PthreadMutexHolder h(&mu_);
-  ++waiter_count_;
-  // Loop until we find a wakeup to consume or timeout.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (wakeup_count_ == 0) {
-    if (!first_pass) MaybeBecomeIdle();
-    // No wakeups available, time to wait.
-    if (!t.has_timeout()) {
-      const int err = pthread_cond_wait(&cv_, &mu_);
-      if (err != 0) {
-        ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
-      }
-    } else {
-      const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
-      if (err == ETIMEDOUT) {
-        --waiter_count_;
-        return false;
-      }
-      if (err != 0) {
-        ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
-      }
-    }
-    first_pass = false;
-  }
-  // Consume a wakeup and we're done.
-  --wakeup_count_;
-  --waiter_count_;
-  return true;
-}
-
-void Waiter::Post() {
-  PthreadMutexHolder h(&mu_);
-  ++wakeup_count_;
-  InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
-  PthreadMutexHolder h(&mu_);
-  InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
-  if (waiter_count_ != 0) {
-    const int err = pthread_cond_signal(&cv_);
-    if (ABSL_PREDICT_FALSE(err != 0)) {
-      ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
-    }
-  }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
-  if (sem_init(&sem_, 0, 0) != 0) {
-    ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
-  }
-  wakeups_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() {
-  if (sem_destroy(&sem_) != 0) {
-    ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
-  }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
-  struct timespec abs_timeout;
-  if (t.has_timeout()) {
-    abs_timeout = t.MakeAbsTimespec();
-  }
-
-  // Loop until we timeout or consume a wakeup.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (true) {
-    int x = wakeups_.load(std::memory_order_relaxed);
-    while (x != 0) {
-      if (!wakeups_.compare_exchange_weak(x, x - 1,
-                                          std::memory_order_acquire,
-                                          std::memory_order_relaxed)) {
-        continue;  // Raced with someone, retry.
-      }
-      // Successfully consumed a wakeup, we're done.
-      return true;
-    }
-
-    if (!first_pass) MaybeBecomeIdle();
-    // Nothing to consume, wait (looping on EINTR).
-    while (true) {
-      if (!t.has_timeout()) {
-        if (sem_wait(&sem_) == 0) break;
-        if (errno == EINTR) continue;
-        ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
-      } else {
-        if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
-        if (errno == EINTR) continue;
-        if (errno == ETIMEDOUT) return false;
-        ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
-      }
-    }
-    first_pass = false;
-  }
-}
-
-void Waiter::Post() {
-  // Post a wakeup.
-  if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
-    // We incremented from 0, need to wake a potential waiter.
-    Poke();
-  }
-}
-
-void Waiter::Poke() {
-  if (sem_post(&sem_) != 0) {  // Wake any semaphore waiter.
-    ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
-  }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
-  static SRWLOCK *GetLock(Waiter *w) {
-    return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
-  }
-
-  static CONDITION_VARIABLE *GetCond(Waiter *w) {
-    return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
-  }
-
-  static_assert(sizeof(SRWLOCK) == sizeof(void *),
-                "`mu_storage_` does not have the same size as SRWLOCK");
-  static_assert(alignof(SRWLOCK) == alignof(void *),
-                "`mu_storage_` does not have the same alignment as SRWLOCK");
-
-  static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
-                "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
-                "as `CONDITION_VARIABLE`");
-  static_assert(
-      alignof(CONDITION_VARIABLE) == alignof(void *),
-      "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
-  // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
-  // and destructible because we never call their constructors or destructors.
-  static_assert(std::is_trivially_constructible<SRWLOCK>::value,
-                "The `SRWLOCK` type must be trivially constructible");
-  static_assert(
-      std::is_trivially_constructible<CONDITION_VARIABLE>::value,
-      "The `CONDITION_VARIABLE` type must be trivially constructible");
-  static_assert(std::is_trivially_destructible<SRWLOCK>::value,
-                "The `SRWLOCK` type must be trivially destructible");
-  static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
-                "The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
-  explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
-    AcquireSRWLockExclusive(mu_);
-  }
-
-  LockHolder(const LockHolder&) = delete;
-  LockHolder& operator=(const LockHolder&) = delete;
-
-  ~LockHolder() {
-    ReleaseSRWLockExclusive(mu_);
-  }
-
- private:
-  SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
-  auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
-  auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
-  InitializeSRWLock(mu);
-  InitializeConditionVariable(cv);
-  waiter_count_ = 0;
-  wakeup_count_ = 0;
-}
-
-// SRW locks and condition variables do not need to be explicitly destroyed.
-// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
-// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
-  SRWLOCK *mu = WinHelper::GetLock(this);
-  CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
-  LockHolder h(mu);
-  ++waiter_count_;
-
-  // Loop until we find a wakeup to consume or timeout.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (wakeup_count_ == 0) {
-    if (!first_pass) MaybeBecomeIdle();
-    // No wakeups available, time to wait.
-    if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
-      // GetLastError() returns a Win32 DWORD, but we assign to
-      // unsigned long to simplify the ABSL_RAW_LOG case below.  The uniform
-      // initialization guarantees this is not a narrowing conversion.
-      const unsigned long err{GetLastError()};  // NOLINT(runtime/int)
-      if (err == ERROR_TIMEOUT) {
-        --waiter_count_;
-        return false;
-      } else {
-        ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
-      }
-    }
-    first_pass = false;
-  }
-  // Consume a wakeup and we're done.
-  --wakeup_count_;
-  --waiter_count_;
-  return true;
-}
-
-void Waiter::Post() {
-  LockHolder h(WinHelper::GetLock(this));
-  ++wakeup_count_;
-  InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
-  LockHolder h(WinHelper::GetLock(this));
-  InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
-  if (waiter_count_ != 0) {
-    WakeConditionVariable(WinHelper::GetCond(this));
-  }
-}
-
-#else
-#error Unknown ABSL_WAITER_MODE
-#endif
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/abseil-cpp/absl/synchronization/internal/waiter.h b/abseil-cpp/absl/synchronization/internal/waiter.h
index 887f9b1..1a8b0b8 100644
--- a/abseil-cpp/absl/synchronization/internal/waiter.h
+++ b/abseil-cpp/absl/synchronization/internal/waiter.h
@@ -17,140 +17,48 @@
 #define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
 
 #include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex_waiter.h"
+#include "absl/synchronization/internal/pthread_waiter.h"
+#include "absl/synchronization/internal/sem_waiter.h"
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+#include "absl/synchronization/internal/win32_waiter.h"
 
 // May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
 #define ABSL_WAITER_MODE_FUTEX 0
 #define ABSL_WAITER_MODE_SEM 1
 #define ABSL_WAITER_MODE_CONDVAR 2
 #define ABSL_WAITER_MODE_WIN32 3
+#define ABSL_WAITER_MODE_STDCPP 4
 
 #if defined(ABSL_FORCE_WAITER_MODE)
 #define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#elif defined(ABSL_INTERNAL_HAVE_WIN32_WAITER)
 #define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(__BIONIC__)
-// Bionic supports all the futex operations we need even when some of the futex
-// definitions are missing.
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX_WAITER)
 #define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
-// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#elif defined(ABSL_INTERNAL_HAVE_SEM_WAITER)
 #define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
-#else
+#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_WAITER)
 #define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#else
+#error ABSL_WAITER_MODE is undefined
 #endif
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace synchronization_internal {
 
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
-  // Prepare any data to track waits.
-  Waiter();
-
-  // Not copyable or movable
-  Waiter(const Waiter&) = delete;
-  Waiter& operator=(const Waiter&) = delete;
-
-  // Destroy any data to track waits.
-  ~Waiter();
-
-  // Blocks the calling thread until a matching call to `Post()` or
-  // `t` has passed. Returns `true` if woken (`Post()` called),
-  // `false` on timeout.
-  bool Wait(KernelTimeout t);
-
-  // Restart the caller of `Wait()` as with a normal semaphore.
-  void Post();
-
-  // If anyone is waiting, wake them up temporarily and cause them to
-  // call `MaybeBecomeIdle()`. They will then return to waiting for a
-  // `Post()` or timeout.
-  void Poke();
-
-  // Returns the Waiter associated with the identity.
-  static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
-    static_assert(
-        sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
-        "Insufficient space for Waiter");
-    return reinterpret_cast<Waiter*>(identity->waiter_state.data);
-  }
-
-  // How many periods to remain idle before releasing resources
-#ifndef ABSL_HAVE_THREAD_SANITIZER
-  static constexpr int kIdlePeriods = 60;
-#else
-  // Memory consumption under ThreadSanitizer is a serious concern,
-  // so we release resources sooner. The value of 1 leads to 1 to 2 second
-  // delay before marking a thread as idle.
-  static const int kIdlePeriods = 1;
-#endif
-
- private:
 #if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-  // Futexes are defined by specification to be 32-bits.
-  // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
-  std::atomic<int32_t> futex_;
-  static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-  // REQUIRES: mu_ must be held.
-  void InternalCondVarPoke();
-
-  pthread_mutex_t mu_;
-  pthread_cond_t cv_;
-  int waiter_count_;
-  int wakeup_count_;  // Unclaimed wakeups.
-
+using Waiter = FutexWaiter;
 #elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-  sem_t sem_;
-  // This seems superfluous, but for Poke() we need to cause spurious
-  // wakeups on the semaphore. Hence we can't actually use the
-  // semaphore's count.
-  std::atomic<int> wakeups_;
-
+using Waiter = SemWaiter;
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+using Waiter = PthreadWaiter;
 #elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-  // WinHelper - Used to define utilities for accessing the lock and
-  // condition variable storage once the types are complete.
-  class WinHelper;
-
-  // REQUIRES: WinHelper::GetLock(this) must be held.
-  void InternalCondVarPoke();
-
-  // We can't include Windows.h in our headers, so we use aligned charachter
-  // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
-  alignas(void*) unsigned char mu_storage_[sizeof(void*)];
-  alignas(void*) unsigned char cv_storage_[sizeof(void*)];
-  int waiter_count_;
-  int wakeup_count_;
-
-#else
-  #error Unknown ABSL_WAITER_MODE
+using Waiter = Win32Waiter;
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_STDCPP
+using Waiter = StdcppWaiter;
 #endif
-};
 
 }  // namespace synchronization_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/synchronization/internal/waiter_base.cc b/abseil-cpp/absl/synchronization/internal/waiter_base.cc
new file mode 100644
index 0000000..46928b4
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/waiter_base.cc
@@ -0,0 +1,42 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter_base.h"
+
+#include "absl/base/config.h"
+#include "absl/base/internal/thread_identity.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int WaiterBase::kIdlePeriods;
+#endif
+
+void WaiterBase::MaybeBecomeIdle() {
+  base_internal::ThreadIdentity *identity =
+      base_internal::CurrentThreadIdentityIfPresent();
+  assert(identity != nullptr);
+  const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+  const int ticker = identity->ticker.load(std::memory_order_relaxed);
+  const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+  if (!is_idle && ticker - wait_start > kIdlePeriods) {
+    identity->is_idle.store(true, std::memory_order_relaxed);
+  }
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/abseil-cpp/absl/synchronization/internal/waiter_base.h b/abseil-cpp/absl/synchronization/internal/waiter_base.h
new file mode 100644
index 0000000..cf17548
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/waiter_base.h
@@ -0,0 +1,90 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+
+#include "absl/base/config.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// `Waiter` is a platform specific semaphore implementation that `PerThreadSem`
+// waits on to implement blocking in `absl::Mutex`.  Implementations should
+// inherit from `WaiterCrtp` and must implement `Wait()`, `Post()`, and `Poke()`
+// as described in `WaiterBase`.  `waiter.h` selects the implementation and uses
+// static-dispatch for performance.
+class WaiterBase {
+ public:
+  WaiterBase() = default;
+
+  // Not copyable or movable
+  WaiterBase(const WaiterBase&) = delete;
+  WaiterBase& operator=(const WaiterBase&) = delete;
+
+  // Blocks the calling thread until a matching call to `Post()` or
+  // `t` has passed. Returns `true` if woken (`Post()` called),
+  // `false` on timeout.
+  //
+  // bool Wait(KernelTimeout t);
+
+  // Restart the caller of `Wait()` as with a normal semaphore.
+  //
+  // void Post();
+
+  // If anyone is waiting, wake them up temporarily and cause them to
+  // call `MaybeBecomeIdle()`. They will then return to waiting for a
+  // `Post()` or timeout.
+  //
+  // void Poke();
+
+  // Returns the name of this implementation. Used only for debugging.
+  //
+  // static constexpr char kName[];
+
+  // How many periods to remain idle before releasing resources
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+  static constexpr int kIdlePeriods = 60;
+#else
+  // Memory consumption under ThreadSanitizer is a serious concern,
+  // so we release resources sooner. The value of 1 leads to 1 to 2 second
+  // delay before marking a thread as idle.
+  static constexpr int kIdlePeriods = 1;
+#endif
+
+ protected:
+  static void MaybeBecomeIdle();
+};
+
+template <typename T>
+class WaiterCrtp : public WaiterBase {
+ public:
+  // Returns the Waiter associated with the identity.
+  static T* GetWaiter(base_internal::ThreadIdentity* identity) {
+    static_assert(
+        sizeof(T) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+        "Insufficient space for Waiter");
+    return reinterpret_cast<T*>(identity->waiter_state.data);
+  }
+};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
diff --git a/abseil-cpp/absl/synchronization/internal/waiter_test.cc b/abseil-cpp/absl/synchronization/internal/waiter_test.cc
new file mode 100644
index 0000000..992db29
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/waiter_test.cc
@@ -0,0 +1,180 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter.h"
+
+#include <ctime>
+#include <iostream>
+#include <ostream>
+
+#include "absl/base/config.h"
+#include "absl/random/random.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/futex_waiter.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/pthread_waiter.h"
+#include "absl/synchronization/internal/sem_waiter.h"
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/internal/win32_waiter.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// Test go/btm support by randomizing the value of clock_gettime() for
+// CLOCK_MONOTONIC. This works by overriding a weak symbol in glibc.
+// We should be resistant to this randomization when !SupportsSteadyClock().
+#if defined(__GOOGLE_GRTE_VERSION__) &&      \
+    !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+    !defined(ABSL_HAVE_MEMORY_SANITIZER) &&  \
+    !defined(ABSL_HAVE_THREAD_SANITIZER)
+extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
+
+extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
+  if (c == CLOCK_MONOTONIC &&
+      !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
+    absl::SharedBitGen gen;
+    ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
+    ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
+    return 0;
+  }
+  return __clock_gettime(c, ts);
+}
+#endif
+
+namespace {
+
+TEST(Waiter, PrintPlatformImplementation) {
+  // Allows us to verify that the platform is using the expected implementation.
+  std::cout << absl::synchronization_internal::Waiter::kName << std::endl;
+}
+
+template <typename T>
+class WaiterTest : public ::testing::Test {
+ public:
+  // Waiter implementations assume that a ThreadIdentity has already been
+  // created.
+  WaiterTest() {
+    absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+  }
+};
+
+TYPED_TEST_SUITE_P(WaiterTest);
+
+absl::Duration WithTolerance(absl::Duration d) { return d * 0.95; }
+
+TYPED_TEST_P(WaiterTest, WaitNoTimeout) {
+  absl::synchronization_internal::ThreadPool tp(1);
+  TypeParam waiter;
+  tp.Schedule([&]() {
+    // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+    // from `Wait()`.
+    waiter.Poke();
+    absl::SleepFor(absl::Seconds(1));
+    waiter.Poke();
+    absl::SleepFor(absl::Seconds(1));
+    waiter.Post();
+  });
+  absl::Time start = absl::Now();
+  EXPECT_TRUE(
+      waiter.Wait(absl::synchronization_internal::KernelTimeout::Never()));
+  absl::Duration waited = absl::Now() - start;
+  EXPECT_GE(waited, WithTolerance(absl::Seconds(2)));
+}
+
+TYPED_TEST_P(WaiterTest, WaitDurationWoken) {
+  absl::synchronization_internal::ThreadPool tp(1);
+  TypeParam waiter;
+  tp.Schedule([&]() {
+    // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+    // from `Wait()`.
+    waiter.Poke();
+    absl::SleepFor(absl::Milliseconds(500));
+    waiter.Post();
+  });
+  absl::Time start = absl::Now();
+  EXPECT_TRUE(waiter.Wait(
+      absl::synchronization_internal::KernelTimeout(absl::Seconds(10))));
+  absl::Duration waited = absl::Now() - start;
+  EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+  EXPECT_LT(waited, absl::Seconds(2));
+}
+
+TYPED_TEST_P(WaiterTest, WaitTimeWoken) {
+  absl::synchronization_internal::ThreadPool tp(1);
+  TypeParam waiter;
+  tp.Schedule([&]() {
+    // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+    // from `Wait()`.
+    waiter.Poke();
+    absl::SleepFor(absl::Milliseconds(500));
+    waiter.Post();
+  });
+  absl::Time start = absl::Now();
+  EXPECT_TRUE(waiter.Wait(absl::synchronization_internal::KernelTimeout(
+      start + absl::Seconds(10))));
+  absl::Duration waited = absl::Now() - start;
+  EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+  EXPECT_LT(waited, absl::Seconds(2));
+}
+
+TYPED_TEST_P(WaiterTest, WaitDurationReached) {
+  TypeParam waiter;
+  absl::Time start = absl::Now();
+  EXPECT_FALSE(waiter.Wait(
+      absl::synchronization_internal::KernelTimeout(absl::Milliseconds(500))));
+  absl::Duration waited = absl::Now() - start;
+  EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+  EXPECT_LT(waited, absl::Seconds(1));
+}
+
+TYPED_TEST_P(WaiterTest, WaitTimeReached) {
+  TypeParam waiter;
+  absl::Time start = absl::Now();
+  EXPECT_FALSE(waiter.Wait(absl::synchronization_internal::KernelTimeout(
+      start + absl::Milliseconds(500))));
+  absl::Duration waited = absl::Now() - start;
+  EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+  EXPECT_LT(waited, absl::Seconds(1));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(WaiterTest,
+                            WaitNoTimeout,
+                            WaitDurationWoken,
+                            WaitTimeWoken,
+                            WaitDurationReached,
+                            WaitTimeReached);
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Futex, WaiterTest,
+                               absl::synchronization_internal::FutexWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Pthread, WaiterTest,
+                               absl::synchronization_internal::PthreadWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_SEM_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Sem, WaiterTest,
+                               absl::synchronization_internal::SemWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_WIN32_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Win32, WaiterTest,
+                               absl::synchronization_internal::Win32Waiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_STDCPP_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Stdcpp, WaiterTest,
+                               absl::synchronization_internal::StdcppWaiter);
+#endif
+
+}  // namespace
diff --git a/abseil-cpp/absl/synchronization/internal/win32_waiter.cc b/abseil-cpp/absl/synchronization/internal/win32_waiter.cc
new file mode 100644
index 0000000..bd95ff0
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/win32_waiter.cc
@@ -0,0 +1,151 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/win32_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_WIN32_WAITER
+
+#include <windows.h>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char Win32Waiter::kName[];
+#endif
+
+class Win32Waiter::WinHelper {
+ public:
+  static SRWLOCK *GetLock(Win32Waiter *w) {
+    return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+  }
+
+  static CONDITION_VARIABLE *GetCond(Win32Waiter *w) {
+    return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+  }
+
+  static_assert(sizeof(SRWLOCK) == sizeof(void *),
+                "`mu_storage_` does not have the same size as SRWLOCK");
+  static_assert(alignof(SRWLOCK) == alignof(void *),
+                "`mu_storage_` does not have the same alignment as SRWLOCK");
+
+  static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
+                "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
+                "as `CONDITION_VARIABLE`");
+  static_assert(
+      alignof(CONDITION_VARIABLE) == alignof(void *),
+      "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
+
+  // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+  // and destructible because we never call their constructors or destructors.
+  static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+                "The `SRWLOCK` type must be trivially constructible");
+  static_assert(
+      std::is_trivially_constructible<CONDITION_VARIABLE>::value,
+      "The `CONDITION_VARIABLE` type must be trivially constructible");
+  static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+                "The `SRWLOCK` type must be trivially destructible");
+  static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+                "The `CONDITION_VARIABLE` type must be trivially destructible");
+};
+
+class LockHolder {
+ public:
+  explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+    AcquireSRWLockExclusive(mu_);
+  }
+
+  LockHolder(const LockHolder&) = delete;
+  LockHolder& operator=(const LockHolder&) = delete;
+
+  ~LockHolder() {
+    ReleaseSRWLockExclusive(mu_);
+  }
+
+ private:
+  SRWLOCK* mu_;
+};
+
+Win32Waiter::Win32Waiter() {
+  auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+  auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+  InitializeSRWLock(mu);
+  InitializeConditionVariable(cv);
+  waiter_count_ = 0;
+  wakeup_count_ = 0;
+}
+
+bool Win32Waiter::Wait(KernelTimeout t) {
+  SRWLOCK *mu = WinHelper::GetLock(this);
+  CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+  LockHolder h(mu);
+  ++waiter_count_;
+
+  // Loop until we find a wakeup to consume or timeout.
+  // Note that, since the thread ticker is just reset, we don't need to check
+  // whether the thread is idle on the very first pass of the loop.
+  bool first_pass = true;
+  while (wakeup_count_ == 0) {
+    if (!first_pass) MaybeBecomeIdle();
+    // No wakeups available, time to wait.
+    if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+      // GetLastError() returns a Win32 DWORD, but we assign to
+      // unsigned long to simplify the ABSL_RAW_LOG case below.  The uniform
+      // initialization guarantees this is not a narrowing conversion.
+      const unsigned long err{GetLastError()};  // NOLINT(runtime/int)
+      if (err == ERROR_TIMEOUT) {
+        --waiter_count_;
+        return false;
+      } else {
+        ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+      }
+    }
+    first_pass = false;
+  }
+  // Consume a wakeup and we're done.
+  --wakeup_count_;
+  --waiter_count_;
+  return true;
+}
+
+void Win32Waiter::Post() {
+  LockHolder h(WinHelper::GetLock(this));
+  ++wakeup_count_;
+  InternalCondVarPoke();
+}
+
+void Win32Waiter::Poke() {
+  LockHolder h(WinHelper::GetLock(this));
+  InternalCondVarPoke();
+}
+
+void Win32Waiter::InternalCondVarPoke() {
+  if (waiter_count_ != 0) {
+    WakeConditionVariable(WinHelper::GetCond(this));
+  }
+}
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_HAVE_WIN32_WAITER
diff --git a/abseil-cpp/absl/synchronization/internal/win32_waiter.h b/abseil-cpp/absl/synchronization/internal/win32_waiter.h
new file mode 100644
index 0000000..87eb617
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/internal/win32_waiter.h
@@ -0,0 +1,70 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#endif
+
+#if defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_WIN32_WAITER 1
+
+class Win32Waiter : public WaiterCrtp<Win32Waiter> {
+ public:
+  Win32Waiter();
+
+  bool Wait(KernelTimeout t);
+  void Post();
+  void Poke();
+
+  static constexpr char kName[] = "Win32Waiter";
+
+ private:
+  // WinHelper - Used to define utilities for accessing the lock and
+  // condition variable storage once the types are complete.
+  class WinHelper;
+
+  // REQUIRES: WinHelper::GetLock(this) must be held.
+  void InternalCondVarPoke();
+
+  // We can't include Windows.h in our headers, so we use aligned character
+  // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+  // SRW locks and condition variables do not need to be explicitly destroyed.
+  // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+  // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+  alignas(void*) unsigned char mu_storage_[sizeof(void*)];
+  alignas(void*) unsigned char cv_storage_[sizeof(void*)];
+  int waiter_count_;
+  int wakeup_count_;
+};
+
+}  // namespace synchronization_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#endif  // ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
diff --git a/abseil-cpp/absl/synchronization/lifetime_test.cc b/abseil-cpp/absl/synchronization/lifetime_test.cc
index cc973a3..d5ce35a 100644
--- a/abseil-cpp/absl/synchronization/lifetime_test.cc
+++ b/abseil-cpp/absl/synchronization/lifetime_test.cc
@@ -18,8 +18,8 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/const_init.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/thread_annotations.h"
+#include "absl/log/check.h"
 #include "absl/synchronization/mutex.h"
 #include "absl/synchronization/notification.h"
 
@@ -35,20 +35,20 @@
 // Thread two waits on 'notification', then sets 'state' inside the 'mutex',
 // signalling the change via 'condvar'.
 //
-// These tests use ABSL_RAW_CHECK to validate invariants, rather than EXPECT or
-// ASSERT from gUnit, because we need to invoke them during global destructors,
-// when gUnit teardown would have already begun.
+// These tests use CHECK to validate invariants, rather than EXPECT or ASSERT
+// from gUnit, because we need to invoke them during global destructors, when
+// gUnit teardown would have already begun.
 void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
                absl::Notification* notification, bool* state) {
   // Test that the notification is in a valid initial state.
-  ABSL_RAW_CHECK(!notification->HasBeenNotified(), "invalid Notification");
-  ABSL_RAW_CHECK(*state == false, "*state not initialized");
+  CHECK(!notification->HasBeenNotified()) << "invalid Notification";
+  CHECK(!*state) << "*state not initialized";
 
   {
     absl::MutexLock lock(mutex);
 
     notification->Notify();
-    ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+    CHECK(notification->HasBeenNotified()) << "invalid Notification";
 
     while (*state == false) {
       condvar->Wait(mutex);
@@ -58,11 +58,11 @@
 
 void ThreadTwo(absl::Mutex* mutex, absl::CondVar* condvar,
                absl::Notification* notification, bool* state) {
-  ABSL_RAW_CHECK(*state == false, "*state not initialized");
+  CHECK(!*state) << "*state not initialized";
 
   // Wake thread one
   notification->WaitForNotification();
-  ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+  CHECK(notification->HasBeenNotified()) << "invalid Notification";
   {
     absl::MutexLock lock(mutex);
     *state = true;
@@ -123,10 +123,10 @@
 };
 
 // These tests require that the compiler correctly supports C++11 constant
-// initialization... but MSVC has a known regression since v19.10:
+// initialization... but MSVC has a known regression since v19.10 till v19.25:
 // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
-// TODO(epastor): Limit the affected range once MSVC fixes this bug.
-#if defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER > 1900)
+#if defined(__clang__) || \
+    !(defined(_MSC_VER) && _MSC_VER > 1900 && _MSC_VER < 1925)
 // kConstInit
 // Test early usage.  (Declaration comes first; definitions must appear after
 // the test runner.)
diff --git a/abseil-cpp/absl/synchronization/mutex.cc b/abseil-cpp/absl/synchronization/mutex.cc
index ad13567..3aa5560 100644
--- a/abseil-cpp/absl/synchronization/mutex.cc
+++ b/abseil-cpp/absl/synchronization/mutex.cc
@@ -35,7 +35,9 @@
 
 #include <algorithm>
 #include <atomic>
-#include <cinttypes>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
 #include <thread>  // NOLINT(build/c++11)
 
 #include "absl/base/attributes.h"
@@ -51,7 +53,7 @@
 #include "absl/base/internal/sysinfo.h"
 #include "absl/base/internal/thread_identity.h"
 #include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/port.h"
+#include "absl/base/optimization.h"
 #include "absl/debugging/stacktrace.h"
 #include "absl/debugging/symbolize.h"
 #include "absl/synchronization/internal/graphcycles.h"
@@ -59,6 +61,7 @@
 #include "absl/time/time.h"
 
 using absl::base_internal::CurrentThreadIdentityIfPresent;
+using absl::base_internal::CycleClock;
 using absl::base_internal::PerThreadSynch;
 using absl::base_internal::SchedulingGuard;
 using absl::base_internal::ThreadIdentity;
@@ -70,7 +73,9 @@
 using absl::synchronization_internal::PerThreadSem;
 
 extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); }
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
+  std::this_thread::yield();
+}
 }  // extern "C"
 
 namespace absl {
@@ -89,70 +94,89 @@
 ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
 
 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-    absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
-        submit_profile_data;
+absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+    submit_profile_data;
 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
-    const char *msg, const void *obj, int64_t wait_cycles)>
+    const char* msg, const void* obj, int64_t wait_cycles)>
     mutex_tracer;
 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-    absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
-        cond_var_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
-    bool (*)(const void *pc, char *out, int out_size)>
-    symbolizer(absl::Symbolize);
+absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
+    cond_var_tracer;
 
 }  // namespace
 
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
                                           bool locking, bool trylock,
                                           bool read_lock);
 
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
+void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
   submit_profile_data.Store(fn);
 }
 
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
                                     int64_t wait_cycles)) {
   mutex_tracer.Store(fn);
 }
 
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
   cond_var_tracer.Store(fn);
 }
 
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
-  symbolizer.Store(fn);
-}
+namespace {
+// Represents the strategy for spin and yield.
+// See the comment in GetMutexGlobals() for more information.
+enum DelayMode { AGGRESSIVE, GENTLE };
 
 struct ABSL_CACHELINE_ALIGNED MutexGlobals {
   absl::once_flag once;
-  int num_cpus = 0;
   int spinloop_iterations = 0;
+  int32_t mutex_sleep_spins[2] = {};
+  absl::Duration mutex_sleep_time;
 };
 
-static const MutexGlobals& GetMutexGlobals() {
+absl::Duration MeasureTimeToYield() {
+  absl::Time before = absl::Now();
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+  return absl::Now() - before;
+}
+
+const MutexGlobals& GetMutexGlobals() {
   ABSL_CONST_INIT static MutexGlobals data;
   absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
-    data.num_cpus = absl::base_internal::NumCPUs();
-    data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
+    if (absl::base_internal::NumCPUs() > 1) {
+      // If this is multiprocessor, allow spinning. If the mode is
+      // aggressive then spin many times before yielding. If the mode is
+      // gentle then spin only a few times before yielding. Aggressive spinning
+      // is used to ensure that an Unlock() call, which must get the spin lock
+      // for any thread to make progress gets it without undue delay.
+      data.spinloop_iterations = 1500;
+      data.mutex_sleep_spins[AGGRESSIVE] = 5000;
+      data.mutex_sleep_spins[GENTLE] = 250;
+      data.mutex_sleep_time = absl::Microseconds(10);
+    } else {
+      // If this a uniprocessor, only yield/sleep. Real-time threads are often
+      // unable to yield, so the sleep time needs to be long enough to keep
+      // the calling thread asleep until scheduling happens.
+      data.spinloop_iterations = 0;
+      data.mutex_sleep_spins[AGGRESSIVE] = 0;
+      data.mutex_sleep_spins[GENTLE] = 0;
+      data.mutex_sleep_time = MeasureTimeToYield() * 5;
+      data.mutex_sleep_time =
+          std::min(data.mutex_sleep_time, absl::Milliseconds(1));
+      data.mutex_sleep_time =
+          std::max(data.mutex_sleep_time, absl::Microseconds(10));
+    }
   });
   return data;
 }
-
-// Spinlock delay on iteration c.  Returns new c.
-namespace {
-  enum DelayMode { AGGRESSIVE, GENTLE };
-};
+}  // namespace
 
 namespace synchronization_internal {
+// Returns the Mutex delay on iteration `c` depending on the given `mode`.
+// The returned value should be used as `c` for the next call to `MutexDelay`.
 int MutexDelay(int32_t c, int mode) {
-  // If this a uniprocessor, only yield/sleep.  Otherwise, if the mode is
-  // aggressive then spin many times before yielding.  If the mode is
-  // gentle then spin only a few times before yielding.  Aggressive spinning is
-  // used to ensure that an Unlock() call, which  must get the spin lock for
-  // any thread to make progress gets it without undue delay.
-  const int32_t limit =
-      GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
+  const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
+  const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
   if (c < limit) {
     // Spin.
     c++;
@@ -161,11 +185,11 @@
     ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
     if (c == limit) {
       // Yield once.
-      AbslInternalMutexYield();
+      ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
       c++;
     } else {
       // Then wait.
-      absl::SleepFor(absl::Microseconds(10));
+      absl::SleepFor(sleep_time);
       c = 0;
     }
     ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
@@ -186,8 +210,7 @@
     v = pv->load(std::memory_order_relaxed);
   } while ((v & bits) != bits &&
            ((v & wait_until_clear) != 0 ||
-            !pv->compare_exchange_weak(v, v | bits,
-                                       std::memory_order_release,
+            !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
                                        std::memory_order_relaxed)));
 }
 
@@ -202,8 +225,7 @@
     v = pv->load(std::memory_order_relaxed);
   } while ((v & bits) != 0 &&
            ((v & wait_until_clear) != 0 ||
-            !pv->compare_exchange_weak(v, v & ~bits,
-                                       std::memory_order_release,
+            !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
                                        std::memory_order_relaxed)));
 }
 
@@ -214,7 +236,7 @@
     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
 
 // Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ABSL_CONST_INIT static GraphCycles* deadlock_graph
     ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
 
 //------------------------------------------------------------------
@@ -258,7 +280,7 @@
 // Properties of the events.
 static const struct {
   int flags;
-  const char *msg;
+  const char* msg;
 } event_properties[] = {
     {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
     {0, "TryLock failed "},
@@ -283,12 +305,12 @@
 // Can't be too small, as it's used for deadlock detection information.
 static constexpr uint32_t kNSynchEvent = 1031;
 
-static struct SynchEvent {     // this is a trivial hash table for the events
+static struct SynchEvent {  // this is a trivial hash table for the events
   // struct is freed when refcount reaches 0
   int refcount ABSL_GUARDED_BY(synch_event_mu);
 
   // buckets have linear, 0-terminated  chains
-  SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
+  SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
 
   // Constant after initialization
   uintptr_t masked_addr;  // object at this address is called "name"
@@ -296,13 +318,13 @@
   // No explicit synchronization used.  Instead we assume that the
   // client who enables/disables invariants/logging on a Mutex does so
   // while the Mutex is not being concurrently accessed by others.
-  void (*invariant)(void *arg);  // called on each event
-  void *arg;            // first arg to (*invariant)()
-  bool log;             // logging turned on
+  void (*invariant)(void* arg);  // called on each event
+  void* arg;                     // first arg to (*invariant)()
+  bool log;                      // logging turned on
 
   // Constant after initialization
-  char name[1];         // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
+  char name[1];  // actually longer---NUL-terminated string
+}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
 
 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
 // set "bits" in the word there (waiting until lockbit is clear before doing
@@ -311,11 +333,11 @@
 // the string name is copied into it.
 // When used with a mutex, the caller should also ensure that kMuEvent
 // is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
-                                    const char *name, intptr_t bits,
+static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
+                                    const char* name, intptr_t bits,
                                     intptr_t lockbit) {
-  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
-  SynchEvent *e;
+  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
+  SynchEvent* e;
   // first look for existing SynchEvent struct..
   synch_event_mu.Lock();
   for (e = synch_event[h];
@@ -327,9 +349,9 @@
       name = "";
     }
     size_t l = strlen(name);
-    e = reinterpret_cast<SynchEvent *>(
+    e = reinterpret_cast<SynchEvent*>(
         base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
-    e->refcount = 2;    // one for return value, one for linked list
+    e->refcount = 2;  // one for return value, one for linked list
     e->masked_addr = base_internal::HidePtr(addr);
     e->invariant = nullptr;
     e->arg = nullptr;
@@ -339,19 +361,19 @@
     AtomicSetBits(addr, bits, lockbit);
     synch_event[h] = e;
   } else {
-    e->refcount++;      // for return value
+    e->refcount++;  // for return value
   }
   synch_event_mu.Unlock();
   return e;
 }
 
 // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
+static void DeleteSynchEvent(SynchEvent* e) {
   base_internal::LowLevelAlloc::Free(e);
 }
 
 // Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
+static void UnrefSynchEvent(SynchEvent* e) {
   if (e != nullptr) {
     synch_event_mu.Lock();
     bool del = (--(e->refcount) == 0);
@@ -365,11 +387,11 @@
 // Forget the mapping from the object (Mutex or CondVar) at address addr
 // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
 // is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
                              intptr_t lockbit) {
-  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
-  SynchEvent **pe;
-  SynchEvent *e;
+  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
+  SynchEvent** pe;
+  SynchEvent* e;
   synch_event_mu.Lock();
   for (pe = &synch_event[h];
        (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -390,9 +412,9 @@
 // Return a refcounted reference to the SynchEvent of the object at address
 // "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is
 // called.
-static SynchEvent *GetSynchEvent(const void *addr) {
-  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
-  SynchEvent *e;
+static SynchEvent* GetSynchEvent(const void* addr) {
+  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
+  SynchEvent* e;
   synch_event_mu.Lock();
   for (e = synch_event[h];
        e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -407,19 +429,25 @@
 
 // Called when an event "ev" occurs on a Mutex of CondVar "obj"
 // if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
-  SynchEvent *e = GetSynchEvent(obj);
+static void PostSynchEvent(void* obj, int ev) {
+  SynchEvent* e = GetSynchEvent(obj);
   // logging is on if event recording is on and either there's no event struct,
   // or it explicitly says to log
   if (e == nullptr || e->log) {
-    void *pcs[40];
+    void* pcs[40];
     int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
     // A buffer with enough space for the ASCII for all the PCs, even on a
     // 64-bit machine.
     char buffer[ABSL_ARRAYSIZE(pcs) * 24];
-    int pos = snprintf(buffer, sizeof (buffer), " @");
+    int pos = snprintf(buffer, sizeof(buffer), " @");
     for (int i = 0; i != n; i++) {
-      pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
+      int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
+                       " %p", pcs[i]);
+      if (b < 0 ||
+          static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
+        break;
+      }
+      pos += b;
     }
     ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
                  (e == nullptr ? "" : e->name), buffer);
@@ -433,13 +461,13 @@
     // get false positive race reports later.
     // Reuse EvalConditionAnnotated to properly call into user code.
     struct local {
-      static bool pred(SynchEvent *ev) {
+      static bool pred(SynchEvent* ev) {
         (*ev->invariant)(ev->arg);
         return false;
       }
     };
     Condition cond(&local::pred, e);
-    Mutex *mu = static_cast<Mutex *>(obj);
+    Mutex* mu = static_cast<Mutex*>(obj);
     const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
     const bool trylock = (flags & SYNCH_F_TRY) != 0;
     const bool read_lock = (flags & SYNCH_F_R) != 0;
@@ -465,43 +493,45 @@
 // PerThreadSynch struct points at the most recent SynchWaitParams struct when
 // the thread is on a Mutex's waiter queue.
 struct SynchWaitParams {
-  SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
-                  KernelTimeout timeout_arg, Mutex *cvmu_arg,
-                  PerThreadSynch *thread_arg,
-                  std::atomic<intptr_t> *cv_word_arg)
+  SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
+                  KernelTimeout timeout_arg, Mutex* cvmu_arg,
+                  PerThreadSynch* thread_arg,
+                  std::atomic<intptr_t>* cv_word_arg)
       : how(how_arg),
         cond(cond_arg),
         timeout(timeout_arg),
         cvmu(cvmu_arg),
         thread(thread_arg),
         cv_word(cv_word_arg),
-        contention_start_cycles(base_internal::CycleClock::Now()) {}
+        contention_start_cycles(CycleClock::Now()),
+        should_submit_contention_data(false) {}
 
   const Mutex::MuHow how;  // How this thread needs to wait.
-  const Condition *cond;  // The condition that this thread is waiting for.
-                          // In Mutex, this field is set to zero if a timeout
-                          // expires.
+  const Condition* cond;   // The condition that this thread is waiting for.
+                           // In Mutex, this field is set to zero if a timeout
+                           // expires.
   KernelTimeout timeout;  // timeout expiry---absolute time
                           // In Mutex, this field is set to zero if a timeout
                           // expires.
-  Mutex *const cvmu;      // used for transfer from cond var to mutex
-  PerThreadSynch *const thread;  // thread that is waiting
+  Mutex* const cvmu;      // used for transfer from cond var to mutex
+  PerThreadSynch* const thread;  // thread that is waiting
 
   // If not null, thread should be enqueued on the CondVar whose state
   // word is cv_word instead of queueing normally on the Mutex.
-  std::atomic<intptr_t> *cv_word;
+  std::atomic<intptr_t>* cv_word;
 
   int64_t contention_start_cycles;  // Time (in cycles) when this thread started
-                                  // to contend for the mutex.
+                                    // to contend for the mutex.
+  bool should_submit_contention_data;
 };
 
 struct SynchLocksHeld {
-  int n;              // number of valid entries in locks[]
-  bool overflow;      // true iff we overflowed the array at some point
+  int n;          // number of valid entries in locks[]
+  bool overflow;  // true iff we overflowed the array at some point
   struct {
-    Mutex *mu;        // lock acquired
-    int32_t count;      // times acquired
-    GraphId id;       // deadlock_graph id of acquired lock
+    Mutex* mu;      // lock acquired
+    int32_t count;  // times acquired
+    GraphId id;     // deadlock_graph id of acquired lock
   } locks[40];
   // If a thread overfills the array during deadlock detection, we
   // continue, discarding information as needed.  If no overflow has
@@ -511,11 +541,11 @@
 
 // A sentinel value in lists that is not 0.
 // A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
-  reinterpret_cast<PerThreadSynch *>(1);
+static PerThreadSynch* const kPerThreadSynchNull =
+    reinterpret_cast<PerThreadSynch*>(1);
 
-static SynchLocksHeld *LocksHeldAlloc() {
-  SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+static SynchLocksHeld* LocksHeldAlloc() {
+  SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
       base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
   ret->n = 0;
   ret->overflow = false;
@@ -523,24 +553,24 @@
 }
 
 // Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
-  ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+static PerThreadSynch* Synch_GetPerThread() {
+  ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
   return &identity->per_thread_synch;
 }
 
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
   if (mu) {
     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
   }
-  PerThreadSynch *w = Synch_GetPerThread();
+  PerThreadSynch* w = Synch_GetPerThread();
   if (mu) {
     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
   }
   return w;
 }
 
-static SynchLocksHeld *Synch_GetAllLocks() {
-  PerThreadSynch *s = Synch_GetPerThread();
+static SynchLocksHeld* Synch_GetAllLocks() {
+  PerThreadSynch* s = Synch_GetPerThread();
   if (s->all_locks == nullptr) {
     s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.
   }
@@ -548,18 +578,23 @@
 }
 
 // Post on "w"'s associated PerThreadSem.
-inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
   if (mu) {
     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
-  }
-  PerThreadSem::Post(w->thread_identity());
-  if (mu) {
+    // We miss synchronization around passing PerThreadSynch between threads
+    // since it happens inside of the Mutex code, so we need to ignore all
+    // accesses to the object.
+    ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+    PerThreadSem::Post(w->thread_identity());
+    ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+  } else {
+    PerThreadSem::Post(w->thread_identity());
   }
 }
 
 // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
   if (mu) {
     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
   }
@@ -580,7 +615,7 @@
 // Mutex code checking that the "waitp" field has not been reused.
 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
   // Fix the per-thread state only if it exists.
-  ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+  ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
   if (identity != nullptr) {
     identity->per_thread_synch.suppress_fatal_errors = true;
   }
@@ -589,21 +624,6 @@
                                  std::memory_order_release);
 }
 
-// --------------------------time support
-
-// Return the current time plus the timeout.  Use the same clock as
-// PerThreadSem::Wait() for consistency.  Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
-  struct timeval tv;
-  gettimeofday(&tv, nullptr);
-  return absl::TimeFromTimeval(tv) + timeout;
-#else
-  return absl::Now() + timeout;
-#endif
-}
-
 // --------------------------Mutexes
 
 // In the layout below, the msb of the bottom byte is currently unused.  Also,
@@ -614,24 +634,29 @@
 //    bit-twiddling trick in Mutex::Unlock().
 //  o kMuWriter / kMuReader == kMuWrWait / kMuWait,
 //    to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader      = 0x0001L;  // a reader holds the lock
-static const intptr_t kMuDesig       = 0x0002L;  // there's a designated waker
-static const intptr_t kMuWait        = 0x0004L;  // threads are waiting
-static const intptr_t kMuWriter      = 0x0008L;  // a writer holds the lock
-static const intptr_t kMuEvent       = 0x0010L;  // record this mutex's events
+static const intptr_t kMuReader = 0x0001L;  // a reader holds the lock
+// There's a designated waker.
 // INVARIANT1:  there's a thread that was blocked on the mutex, is
 // no longer, yet has not yet acquired the mutex.  If there's a
 // designated waker, all threads can avoid taking the slow path in
 // unlock because the designated waker will subsequently acquire
 // the lock and wake someone.  To maintain INVARIANT1 the bit is
 // set when a thread is unblocked(INV1a), and threads that were
-// unblocked reset the bit when they either acquire or re-block
-// (INV1b).
-static const intptr_t kMuWrWait      = 0x0020L;  // runnable writer is waiting
-                                                 // for a reader
-static const intptr_t kMuSpin        = 0x0040L;  // spinlock protects wait list
-static const intptr_t kMuLow         = 0x00ffL;  // mask all mutex bits
-static const intptr_t kMuHigh        = ~kMuLow;  // mask pointer/reader count
+// unblocked reset the bit when they either acquire or re-block (INV1b).
+static const intptr_t kMuDesig = 0x0002L;
+static const intptr_t kMuWait = 0x0004L;    // threads are waiting
+static const intptr_t kMuWriter = 0x0008L;  // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L;   // record this mutex's events
+// Runnable writer is waiting for a reader.
+// If set, new readers will not lock the mutex to avoid writer starvation.
+// Note: if a reader has higher priority than the writer, it will still lock
+// the mutex ahead of the waiting writer, but in a very inefficient manner:
+// the reader will first queue itself and block, but then the last unlocking
+// reader will wake it.
+static const intptr_t kMuWrWait = 0x0020L;
+static const intptr_t kMuSpin = 0x0040L;  // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL;   // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow;  // mask pointer/reader count
 
 // Hack to make constant values available to gdb pretty printer
 enum {
@@ -727,8 +752,8 @@
   ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
 }
 
-void Mutex::EnableDebugLog(const char *name) {
-  SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+void Mutex::EnableDebugLog(const char* name) {
+  SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
   e->log = true;
   UnrefSynchEvent(e);
 }
@@ -737,11 +762,10 @@
   synch_check_invariants.store(enabled, std::memory_order_release);
 }
 
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
-                                     void *arg) {
+void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
   if (synch_check_invariants.load(std::memory_order_acquire) &&
       invariant != nullptr) {
-    SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+    SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
     e->invariant = invariant;
     e->arg = arg;
     UnrefSynchEvent(e);
@@ -752,18 +776,20 @@
   synch_deadlock_detection.store(mode, std::memory_order_release);
 }
 
-// Return true iff threads x and y are waiting on the same condition for the
-// same type of lock.  Requires that x and y be waiting on the same Mutex
-// queue.
-static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y) {
-  return x->waitp->how == y->waitp->how &&
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
+  return x->waitp->how == y->waitp->how && x->priority == y->priority &&
          Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
 }
 
 // Given the contents of a mutex word containing a PerThreadSynch pointer,
 // return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
-  return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
+  return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
 }
 
 // The next several routines maintain the per-thread next and skip fields
@@ -775,18 +801,19 @@
 //     - invalid (iff x is not in a Mutex wait queue),
 //     - null, or
 //     - a pointer to a distinct thread waiting later in the same Mutex queue
-//       such that all threads in [x, x->skip] have the same condition and
-//       lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
+//       such that all threads in [x, x->skip] have the same condition, priority
+//       and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+//       x->skip]).
 // In addition, if x->skip is  valid, (x->may_skip || x->skip == null)
 //
-// By the spec of MuSameCondition(), it is not necessary when removing the
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
 // first runnable thread y from the front a Mutex queue to adjust the skip
 // field of another thread x because if x->skip==y, x->skip must (have) become
 // invalid before y is removed.  The function TryRemove can remove a specified
 // thread from an arbitrary position in the queue whether runnable or not, so
 // it fixes up skip fields that would otherwise be left dangling.
 // The statement
-//     if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
+//     if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
 // maintains the invariant provided x is not the last waiter in a Mutex queue
 // The statement
 //          if (x->skip != null) { x->skip = x->skip->skip; }
@@ -820,17 +847,17 @@
 // except those in the added node and the former "head" node.  This implies
 // that the new node is added after head, and so must be the new head or the
 // new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
-  PerThreadSynch *x0 = nullptr;
-  PerThreadSynch *x1 = x;
-  PerThreadSynch *x2 = x->skip;
+static PerThreadSynch* Skip(PerThreadSynch* x) {
+  PerThreadSynch* x0 = nullptr;
+  PerThreadSynch* x1 = x;
+  PerThreadSynch* x2 = x->skip;
   if (x2 != nullptr) {
     // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
     // such that   x1 == x0->skip && x2 == x1->skip
     while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
-      x0->skip = x2;      // short-circuit skip from x0 to x2
+      x0->skip = x2;  // short-circuit skip from x0 to x2
     }
-    x->skip = x1;         // short-circuit skip from x to result
+    x->skip = x1;  // short-circuit skip from x to result
   }
   return x1;
 }
@@ -839,7 +866,7 @@
 // The latter is going to be removed out of order, because of a timeout.
 // Check whether "ancestor" has a skip field pointing to "to_be_removed",
 // and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
   if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling
     if (to_be_removed->skip != nullptr) {
       ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed
@@ -851,7 +878,7 @@
   }
 }
 
-static void CondVarEnqueue(SynchWaitParams *waitp);
+static void CondVarEnqueue(SynchWaitParams* waitp);
 
 // Enqueue thread "waitp->thread" on a waiter queue.
 // Called with mutex spinlock held if head != nullptr
@@ -872,8 +899,8 @@
 // returned. This mechanism is used by CondVar to queue a thread on the
 // condition variable queue instead of the mutex queue in implementing Wait().
 // In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
-                               SynchWaitParams *waitp, intptr_t mu, int flags) {
+static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
+                               intptr_t mu, int flags) {
   // If we have been given a cv_word, call CondVarEnqueue() and return
   // the previous head of the Mutex waiter queue.
   if (waitp->cv_word != nullptr) {
@@ -881,72 +908,66 @@
     return head;
   }
 
-  PerThreadSynch *s = waitp->thread;
+  PerThreadSynch* s = waitp->thread;
   ABSL_RAW_CHECK(
       s->waitp == nullptr ||    // normal case
           s->waitp == waitp ||  // Fer()---transfer from condition variable
           s->suppress_fatal_errors,
       "detected illegal recursion into Mutex code");
   s->waitp = waitp;
-  s->skip = nullptr;             // maintain skip invariant (see above)
-  s->may_skip = true;            // always true on entering queue
-  s->wake = false;               // not being woken
+  s->skip = nullptr;   // maintain skip invariant (see above)
+  s->may_skip = true;  // always true on entering queue
+  s->wake = false;     // not being woken
   s->cond_waiter = ((flags & kMuIsCond) != 0);
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+  int64_t now_cycles = CycleClock::Now();
+  if (s->next_priority_read_cycles < now_cycles) {
+    // Every so often, update our idea of the thread's priority.
+    // pthread_getschedparam() is 5% of the block/wakeup time;
+    // CycleClock::Now() is 0.5%.
+    int policy;
+    struct sched_param param;
+    const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+    if (err != 0) {
+      ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+    } else {
+      s->priority = param.sched_priority;
+      s->next_priority_read_cycles =
+          now_cycles + static_cast<int64_t>(CycleClock::Frequency());
+    }
+  }
+#endif
   if (head == nullptr) {         // s is the only waiter
     s->next = s;                 // it's the only entry in the cycle
     s->readers = mu;             // reader count is from mu word
     s->maybe_unlocking = false;  // no one is searching an empty list
     head = s;                    // s is new head
   } else {
-    PerThreadSynch *enqueue_after = nullptr;  // we'll put s after this element
+    PerThreadSynch* enqueue_after = nullptr;  // we'll put s after this element
 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
-    int64_t now_cycles = base_internal::CycleClock::Now();
-    if (s->next_priority_read_cycles < now_cycles) {
-      // Every so often, update our idea of the thread's priority.
-      // pthread_getschedparam() is 5% of the block/wakeup time;
-      // base_internal::CycleClock::Now() is 0.5%.
-      int policy;
-      struct sched_param param;
-      const int err = pthread_getschedparam(pthread_self(), &policy, &param);
-      if (err != 0) {
-        ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
-      } else {
-        s->priority = param.sched_priority;
-        s->next_priority_read_cycles =
-            now_cycles +
-            static_cast<int64_t>(base_internal::CycleClock::Frequency());
-      }
-    }
     if (s->priority > head->priority) {  // s's priority is above head's
       // try to put s in priority-fifo order, or failing that at the front.
       if (!head->maybe_unlocking) {
-        // No unlocker can be scanning the queue, so we can insert between
-        // skip-chains, and within a skip-chain if it has the same condition as
-        // s.  We insert in priority-fifo order, examining the end of every
-        // skip-chain, plus every element with the same condition as s.
-        PerThreadSynch *advance_to = head;    // next value of enqueue_after
-        PerThreadSynch *cur;                  // successor of enqueue_after
+        // No unlocker can be scanning the queue, so we can insert into the
+        // middle of the queue.
+        //
+        // Within a skip chain, all waiters have the same priority, so we can
+        // skip forward through the chains until we find one with a lower
+        // priority than the waiter to be enqueued.
+        PerThreadSynch* advance_to = head;  // next value of enqueue_after
         do {
           enqueue_after = advance_to;
-          cur = enqueue_after->next;  // this advance ensures progress
-          advance_to = Skip(cur);   // normally, advance to end of skip chain
-                                    // (side-effect: optimizes skip chain)
-          if (advance_to != cur && s->priority > advance_to->priority &&
-              MuSameCondition(s, cur)) {
-            // but this skip chain is not a singleton, s has higher priority
-            // than its tail and has the same condition as the chain,
-            // so we can insert within the skip-chain
-            advance_to = cur;         // advance by just one
-          }
+          // (side-effect: optimizes skip chain)
+          advance_to = Skip(enqueue_after->next);
         } while (s->priority <= advance_to->priority);
-              // termination guaranteed because s->priority > head->priority
-              // and head is the end of a skip chain
+        // termination guaranteed because s->priority > head->priority
+        // and head is the end of a skip chain
       } else if (waitp->how == kExclusive &&
                  Condition::GuaranteedEqual(waitp->cond, nullptr)) {
         // An unlocker could be scanning the queue, but we know it will recheck
         // the queue front for writers that have no condition, which is what s
         // is, so an insert at front is safe.
-        enqueue_after = head;       // add after head, at front
+        enqueue_after = head;  // add after head, at front
       }
     }
 #endif
@@ -956,31 +977,31 @@
 
       // enqueue_after can be: head, Skip(...), or cur.
       // The first two imply enqueue_after->skip == nullptr, and
-      // the last is used only if MuSameCondition(s, cur).
+      // the last is used only if MuEquivalentWaiter(s, cur).
       // We require this because clearing enqueue_after->skip
       // is impossible; enqueue_after's predecessors might also
       // incorrectly skip over s if we were to allow other
       // insertion points.
-      ABSL_RAW_CHECK(
-          enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
-          "Mutex Enqueue failure");
+      ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+                         MuEquivalentWaiter(enqueue_after, s),
+                     "Mutex Enqueue failure");
 
       if (enqueue_after != head && enqueue_after->may_skip &&
-          MuSameCondition(enqueue_after, enqueue_after->next)) {
+          MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
         // enqueue_after can skip to its new successor, s
         enqueue_after->skip = enqueue_after->next;
       }
-      if (MuSameCondition(s, s->next)) {  // s->may_skip is known to be true
-        s->skip = s->next;                // s may skip to its successor
+      if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
+        s->skip = s->next;                   // s may skip to its successor
       }
-    } else {   // enqueue not done any other way, so
-               // we're inserting s at the back
+    } else {  // enqueue not done any other way, so
+              // we're inserting s at the back
       // s will become new head; copy data from head into it
-      s->next = head->next;        // add s after head
+      s->next = head->next;  // add s after head
       head->next = s;
       s->readers = head->readers;  // reader count is from previous head
       s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
-      if (head->may_skip && MuSameCondition(head, s)) {
+      if (head->may_skip && MuEquivalentWaiter(head, s)) {
         // head now has successor; may skip
         head->skip = s;
       }
@@ -995,17 +1016,17 @@
 // whose last element is head.  The new head element is returned, or null
 // if the list is made empty.
 // Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
-  PerThreadSynch *w = pw->next;
-  pw->next = w->next;         // snip w out of list
-  if (head == w) {            // we removed the head
+static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
+  PerThreadSynch* w = pw->next;
+  pw->next = w->next;                 // snip w out of list
+  if (head == w) {                    // we removed the head
     head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
-  } else if (pw != head && MuSameCondition(pw, pw->next)) {
+  } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
     // pw can skip to its new successor
     if (pw->next->skip !=
         nullptr) {  // either skip to its successors skip target
       pw->skip = pw->next->skip;
-    } else {                   // or to pw's successor
+    } else {  // or to pw's successor
       pw->skip = pw->next;
     }
   }
@@ -1018,27 +1039,27 @@
 // singly-linked list wake_list in the order found.   Assumes that
 // there is only one such element if the element has how == kExclusive.
 // Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
-                                          PerThreadSynch *pw,
-                                          PerThreadSynch **wake_tail) {
-  PerThreadSynch *orig_h = head;
-  PerThreadSynch *w = pw->next;
+static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
+                                          PerThreadSynch* pw,
+                                          PerThreadSynch** wake_tail) {
+  PerThreadSynch* orig_h = head;
+  PerThreadSynch* w = pw->next;
   bool skipped = false;
   do {
-    if (w->wake) {                    // remove this element
+    if (w->wake) {  // remove this element
       ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
       // we're removing pw's successor so either pw->skip is zero or we should
       // already have removed pw since if pw->skip!=null, pw has the same
       // condition as w.
       head = Dequeue(head, pw);
-      w->next = *wake_tail;           // keep list terminated
-      *wake_tail = w;                 // add w to wake_list;
-      wake_tail = &w->next;           // next addition to end
+      w->next = *wake_tail;               // keep list terminated
+      *wake_tail = w;                     // add w to wake_list;
+      wake_tail = &w->next;               // next addition to end
       if (w->waitp->how == kExclusive) {  // wake at most 1 writer
         break;
       }
-    } else {                // not waking this one; skip
-      pw = Skip(w);       // skip as much as possible
+    } else {         // not waking this one; skip
+      pw = Skip(w);  // skip as much as possible
       skipped = true;
     }
     w = pw->next;
@@ -1056,7 +1077,7 @@
 
 // Try to remove thread s from the list of waiters on this mutex.
 // Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
+void Mutex::TryRemove(PerThreadSynch* s) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   intptr_t v = mu_.load(std::memory_order_relaxed);
   // acquire spinlock & lock
@@ -1064,17 +1085,19 @@
       mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
                                   std::memory_order_acquire,
                                   std::memory_order_relaxed)) {
-    PerThreadSynch *h = GetPerThreadSynch(v);
+    PerThreadSynch* h = GetPerThreadSynch(v);
     if (h != nullptr) {
-      PerThreadSynch *pw = h;   // pw is w's predecessor
-      PerThreadSynch *w;
+      PerThreadSynch* pw = h;  // pw is w's predecessor
+      PerThreadSynch* w;
       if ((w = pw->next) != s) {  // search for thread,
         do {                      // processing at least one element
-          if (!MuSameCondition(s, w)) {  // seeking different condition
-            pw = Skip(w);                // so skip all that won't match
+          // If the current element isn't equivalent to the waiter to be
+          // removed, we can skip the entire chain.
+          if (!MuEquivalentWaiter(s, w)) {
+            pw = Skip(w);  // so skip all that won't match
             // we don't have to worry about dangling skip fields
             // in the threads we skipped; none can point to s
-            // because their condition differs from s
+            // because they are in a different equivalence class.
           } else {          // seeking same condition
             FixSkip(w, s);  // fix up any skip pointer from w to s
             pw = w;
@@ -1083,7 +1106,7 @@
           // process the first thread again.
         } while ((w = pw->next) != s && pw != h);
       }
-      if (w == s) {                 // found thread; remove it
+      if (w == s) {  // found thread; remove it
         // pw->skip may be non-zero here; the loop above ensured that
         // no ancestor of s can skip to s, so removal is safe anyway.
         h = Dequeue(h, pw);
@@ -1092,16 +1115,15 @@
       }
     }
     intptr_t nv;
-    do {                        // release spinlock and lock
+    do {  // release spinlock and lock
       v = mu_.load(std::memory_order_relaxed);
       nv = v & (kMuDesig | kMuEvent);
       if (h != nullptr) {
         nv |= kMuWait | reinterpret_cast<intptr_t>(h);
-        h->readers = 0;            // we hold writer lock
+        h->readers = 0;              // we hold writer lock
         h->maybe_unlocking = false;  // finished unlocking
       }
-    } while (!mu_.compare_exchange_weak(v, nv,
-                                        std::memory_order_release,
+    } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
                                         std::memory_order_relaxed));
   }
 }
@@ -1111,7 +1133,7 @@
 // if the wait extends past the absolute time specified, even if "s" is still
 // on the mutex queue.  In this case, remove "s" from the queue and return
 // true, otherwise return false.
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
+void Mutex::Block(PerThreadSynch* s) {
   while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
     if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
       // After a timeout, we go into a spin loop until we remove ourselves
@@ -1130,7 +1152,7 @@
         // is not on the queue.
         this->TryRemove(s);
       }
-      s->waitp->timeout = KernelTimeout::Never();      // timeout is satisfied
+      s->waitp->timeout = KernelTimeout::Never();  // timeout is satisfied
       s->waitp->cond = nullptr;  // condition no longer relevant for wakeups
     }
   }
@@ -1140,8 +1162,8 @@
 }
 
 // Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
-  PerThreadSynch *next = w->next;
+PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
+  PerThreadSynch* next = w->next;
   w->next = nullptr;
   w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
   IncrementSynchSem(this, w);
@@ -1149,7 +1171,7 @@
   return next;
 }
 
-static GraphId GetGraphIdLocked(Mutex *mu)
+static GraphId GetGraphIdLocked(Mutex* mu)
     ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
   if (!deadlock_graph) {  // (re)create the deadlock graph.
     deadlock_graph =
@@ -1159,7 +1181,7 @@
   return deadlock_graph->GetId(mu);
 }
 
-static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
   deadlock_graph_mu.Lock();
   GraphId id = GetGraphIdLocked(mu);
   deadlock_graph_mu.Unlock();
@@ -1169,7 +1191,7 @@
 // Record a lock acquisition.  This is used in debug mode for deadlock
 // detection.  The held_locks pointer points to the relevant data
 // structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
   int n = held_locks->n;
   int i = 0;
   while (i != n && held_locks->locks[i].id != id) {
@@ -1193,7 +1215,7 @@
 // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
 // It does not process the event if is not needed when deadlock detection is
 // disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
   int n = held_locks->n;
   int i = 0;
   while (i != n && held_locks->locks[i].id != id) {
@@ -1208,11 +1230,11 @@
         i++;
       }
       if (i == n) {  // mu missing means releasing unheld lock
-        SynchEvent *mu_events = GetSynchEvent(mu);
+        SynchEvent* mu_events = GetSynchEvent(mu);
         ABSL_RAW_LOG(FATAL,
                      "thread releasing lock it does not hold: %p %s; "
                      ,
-                     static_cast<void *>(mu),
+                     static_cast<void*>(mu),
                      mu_events == nullptr ? "" : mu_events->name);
       }
     }
@@ -1229,7 +1251,7 @@
 }
 
 // Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
+static inline void DebugOnlyLockEnter(Mutex* mu) {
   if (kDebugMode) {
     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
         OnDeadlockCycle::kIgnore) {
@@ -1239,7 +1261,7 @@
 }
 
 // Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
   if (kDebugMode) {
     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
         OnDeadlockCycle::kIgnore) {
@@ -1249,7 +1271,7 @@
 }
 
 // Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
+static inline void DebugOnlyLockLeave(Mutex* mu) {
   if (kDebugMode) {
     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
         OnDeadlockCycle::kIgnore) {
@@ -1258,36 +1280,40 @@
   }
 }
 
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
+static char* StackString(void** pcs, int n, char* buf, int maxlen,
                          bool symbolize) {
-  static const int kSymLen = 200;
+  static constexpr int kSymLen = 200;
   char sym[kSymLen];
   int len = 0;
   for (int i = 0; i != n; i++) {
+    if (len >= maxlen)
+      return buf;
+    size_t count = static_cast<size_t>(maxlen - len);
     if (symbolize) {
-      if (!symbolizer(pcs[i], sym, kSymLen)) {
+      if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
         sym[0] = '\0';
       }
-      snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
-               (i == 0 ? "\n" : ""),
-               pcs[i], sym);
+      snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
+               sym);
     } else {
-      snprintf(buf + len, maxlen - len, " %p", pcs[i]);
+      snprintf(buf + len, count, " %p", pcs[i]);
     }
     len += strlen(&buf[len]);
   }
   return buf;
 }
 
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
-  void *pcs[40];
+static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
+  void* pcs[40];
   return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
                      maxlen, symbolize);
 }
 
 namespace {
-enum { kMaxDeadlockPathLen = 10 };  // maximum length of a deadlock cycle;
-                                    // a path this long would be remarkable
+enum {
+  kMaxDeadlockPathLen = 10
+};  // maximum length of a deadlock cycle;
+    // a path this long would be remarkable
 // Buffers required to report a deadlock.
 // We do not allocate them on stack to avoid large stack frame.
 struct DeadlockReportBuffers {
@@ -1297,11 +1323,11 @@
 
 struct ScopedDeadlockReportBuffers {
   ScopedDeadlockReportBuffers() {
-    b = reinterpret_cast<DeadlockReportBuffers *>(
+    b = reinterpret_cast<DeadlockReportBuffers*>(
         base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
   }
   ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
-  DeadlockReportBuffers *b;
+  DeadlockReportBuffers* b;
 };
 
 // Helper to pass to GraphCycles::UpdateStackTrace.
@@ -1312,13 +1338,13 @@
 
 // Called in debug mode when a thread is about to acquire a lock in a way that
 // may block.
-static GraphId DeadlockCheck(Mutex *mu) {
+static GraphId DeadlockCheck(Mutex* mu) {
   if (synch_deadlock_detection.load(std::memory_order_acquire) ==
       OnDeadlockCycle::kIgnore) {
     return InvalidGraphId();
   }
 
-  SynchLocksHeld *all_locks = Synch_GetAllLocks();
+  SynchLocksHeld* all_locks = Synch_GetAllLocks();
 
   absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
   const GraphId mu_id = GetGraphIdLocked(mu);
@@ -1340,8 +1366,8 @@
   // For each other mutex already held by this thread:
   for (int i = 0; i != all_locks->n; i++) {
     const GraphId other_node_id = all_locks->locks[i].id;
-    const Mutex *other =
-        static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+    const Mutex* other =
+        static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
     if (other == nullptr) {
       // Ignore stale lock
       continue;
@@ -1350,46 +1376,51 @@
     // Add the acquired-before edge to the graph.
     if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
       ScopedDeadlockReportBuffers scoped_buffers;
-      DeadlockReportBuffers *b = scoped_buffers.b;
+      DeadlockReportBuffers* b = scoped_buffers.b;
       static int number_of_reported_deadlocks = 0;
       number_of_reported_deadlocks++;
       // Symbolize only 2 first deadlock report to avoid huge slowdowns.
       bool symbolize = number_of_reported_deadlocks <= 2;
       ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
                    CurrentStackString(b->buf, sizeof (b->buf), symbolize));
-      int len = 0;
+      size_t len = 0;
       for (int j = 0; j != all_locks->n; j++) {
         void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
         if (pr != nullptr) {
-          snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
-          len += static_cast<int>(strlen(&b->buf[len]));
+          snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
+          len += strlen(&b->buf[len]);
         }
       }
-      ABSL_RAW_LOG(ERROR, "Acquiring %p    Mutexes held: %s",
-                   static_cast<void *>(mu), b->buf);
+      ABSL_RAW_LOG(ERROR,
+                   "Acquiring absl::Mutex %p while holding %s; a cycle in the "
+                   "historical lock ordering graph has been observed",
+                   static_cast<void*>(mu), b->buf);
       ABSL_RAW_LOG(ERROR, "Cycle: ");
-      int path_len = deadlock_graph->FindPath(
-          mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
-      for (int j = 0; j != path_len; j++) {
+      int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
+                                              ABSL_ARRAYSIZE(b->path), b->path);
+      for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
         GraphId id = b->path[j];
-        Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+        Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
         if (path_mu == nullptr) continue;
         void** stack;
         int depth = deadlock_graph->GetStackTrace(id, &stack);
         snprintf(b->buf, sizeof(b->buf),
-                 "mutex@%p stack: ", static_cast<void *>(path_mu));
+                 "mutex@%p stack: ", static_cast<void*>(path_mu));
         StackString(stack, depth, b->buf + strlen(b->buf),
                     static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
                     symbolize);
         ABSL_RAW_LOG(ERROR, "%s", b->buf);
       }
+      if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
+        ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
+      }
       if (synch_deadlock_detection.load(std::memory_order_acquire) ==
           OnDeadlockCycle::kAbort) {
         deadlock_graph_mu.Unlock();  // avoid deadlock in fatal sighandler
         ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
         return mu_id;
       }
-      break;   // report at most one potential deadlock per acquisition
+      break;  // report at most one potential deadlock per acquisition
     }
   }
 
@@ -1398,7 +1429,7 @@
 
 // Invoke DeadlockCheck() iff we're in debug mode and
 // deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
   if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
                         OnDeadlockCycle::kIgnore) {
     return DeadlockCheck(mu);
@@ -1425,13 +1456,13 @@
       (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
       synch_deadlock_detection.load(std::memory_order_acquire) !=
           OnDeadlockCycle::kIgnore) {
-    GraphId id = GetGraphId(const_cast<Mutex *>(this));
-    SynchLocksHeld *locks = Synch_GetAllLocks();
+    GraphId id = GetGraphId(const_cast<Mutex*>(this));
+    SynchLocksHeld* locks = Synch_GetAllLocks();
     for (int i = 0; i != locks->n; i++) {
       if (locks->locks[i].id == id) {
-        SynchEvent *mu_events = GetSynchEvent(this);
+        SynchEvent* mu_events = GetSynchEvent(this);
         ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
-                     static_cast<const void *>(this),
+                     static_cast<const void*>(this),
                      (mu_events == nullptr ? "" : mu_events->name));
       }
     }
@@ -1444,8 +1475,8 @@
   int c = GetMutexGlobals().spinloop_iterations;
   do {  // do/while somewhat faster on AMD
     intptr_t v = mu->load(std::memory_order_relaxed);
-    if ((v & (kMuReader|kMuEvent)) != 0) {
-      return false;  // a reader or tracing -> give up
+    if ((v & (kMuReader | kMuEvent)) != 0) {
+      return false;                       // a reader or tracing -> give up
     } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire
                mu->compare_exchange_strong(v, kMuWriter | v,
                                            std::memory_order_acquire,
@@ -1456,14 +1487,13 @@
   return false;
 }
 
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
+void Mutex::Lock() {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
   GraphId id = DebugOnlyDeadlockCheck(this);
   intptr_t v = mu_.load(std::memory_order_relaxed);
   // try fast acquire, then spin loop
   if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
-      !mu_.compare_exchange_strong(v, kMuWriter | v,
-                                   std::memory_order_acquire,
+      !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
                                    std::memory_order_relaxed)) {
     // try spin acquire, then slow loop
     if (!TryAcquireWithSpinning(&this->mu_)) {
@@ -1474,7 +1504,7 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
 }
 
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
+void Mutex::ReaderLock() {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
   GraphId id = DebugOnlyDeadlockCheck(this);
   intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1489,7 +1519,7 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
 }
 
-void Mutex::LockWhen(const Condition &cond) {
+void Mutex::LockWhen(const Condition& cond) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
   GraphId id = DebugOnlyDeadlockCheck(this);
   this->LockSlow(kExclusive, &cond, 0);
@@ -1497,21 +1527,26 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
 }
 
-bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
-  return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
+bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
   GraphId id = DebugOnlyDeadlockCheck(this);
-  bool res = LockSlowWithDeadline(kExclusive, &cond,
-                                  KernelTimeout(deadline), 0);
+  bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
   DebugOnlyLockEnter(this, id);
   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
   return res;
 }
 
-void Mutex::ReaderLockWhen(const Condition &cond) {
+bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
+  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+  GraphId id = DebugOnlyDeadlockCheck(this);
+  bool res =
+      LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
+  DebugOnlyLockEnter(this, id);
+  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+  return res;
+}
+
+void Mutex::ReaderLockWhen(const Condition& cond) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
   GraphId id = DebugOnlyDeadlockCheck(this);
   this->LockSlow(kShared, &cond, 0);
@@ -1519,12 +1554,17 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
 }
 
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
                                       absl::Duration timeout) {
-  return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+  GraphId id = DebugOnlyDeadlockCheck(this);
+  bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+  DebugOnlyLockEnter(this, id);
+  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+  return res;
 }
 
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
                                        absl::Time deadline) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
   GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1534,23 +1574,34 @@
   return res;
 }
 
-void Mutex::Await(const Condition &cond) {
-  if (cond.Eval()) {    // condition already true; nothing to do
+void Mutex::Await(const Condition& cond) {
+  if (cond.Eval()) {  // condition already true; nothing to do
     if (kDebugMode) {
       this->AssertReaderHeld();
     }
-  } else {              // normal case
+  } else {  // normal case
     ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
                    "condition untrue on return from Await");
   }
 }
 
-bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
-  return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
+  if (cond.Eval()) {  // condition already true; nothing to do
+    if (kDebugMode) {
+      this->AssertReaderHeld();
+    }
+    return true;
+  }
+
+  KernelTimeout t{timeout};
+  bool res = this->AwaitCommon(cond, t);
+  ABSL_RAW_CHECK(res || t.has_timeout(),
+                 "condition untrue on return from Await");
+  return res;
 }
 
-bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
-  if (cond.Eval()) {      // condition already true; nothing to do
+bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
+  if (cond.Eval()) {  // condition already true; nothing to do
     if (kDebugMode) {
       this->AssertReaderHeld();
     }
@@ -1564,14 +1615,14 @@
   return res;
 }
 
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
   this->AssertReaderHeld();
   MuHow how =
       (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
-  SynchWaitParams waitp(
-      how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
-      nullptr /*no cv_word*/);
+  SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
+                        Synch_GetPerThreadAnnotated(this),
+                        nullptr /*no cv_word*/);
   int flags = kMuHasBlocked;
   if (!Condition::GuaranteedEqual(&cond, nullptr)) {
     flags |= kMuIsCond;
@@ -1587,18 +1638,17 @@
   return res;
 }
 
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
+bool Mutex::TryLock() {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
   intptr_t v = mu_.load(std::memory_order_relaxed);
   if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 &&  // try fast acquire
-      mu_.compare_exchange_strong(v, kMuWriter | v,
-                                  std::memory_order_acquire,
+      mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
                                   std::memory_order_relaxed)) {
     DebugOnlyLockEnter(this);
     ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
     return true;
   }
-  if ((v & kMuEvent) != 0) {              // we're recording events
+  if ((v & kMuEvent) != 0) {                      // we're recording events
     if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire
         mu_.compare_exchange_strong(
             v, (kExclusive->fast_or | v) + kExclusive->fast_add,
@@ -1616,7 +1666,7 @@
   return false;
 }
 
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
+bool Mutex::ReaderTryLock() {
   ABSL_TSAN_MUTEX_PRE_LOCK(this,
                            __tsan_mutex_read_lock | __tsan_mutex_try_lock);
   intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1624,7 +1674,7 @@
   // changing (typically because the reader count changes) under the CAS.  We
   // limit the number of attempts to avoid having to think about livelock.
   int loop_limit = 5;
-  while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+  while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
     if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
                                     std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
@@ -1636,7 +1686,7 @@
     loop_limit--;
     v = mu_.load(std::memory_order_relaxed);
   }
-  if ((v & kMuEvent) != 0) {   // we're recording events
+  if ((v & kMuEvent) != 0) {  // we're recording events
     loop_limit = 5;
     while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
       if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
@@ -1662,7 +1712,7 @@
   return false;
 }
 
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
+void Mutex::Unlock() {
   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
   DebugOnlyLockLeave(this);
   intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1675,7 +1725,7 @@
   // should_try_cas is whether we'll try a compare-and-swap immediately.
   // NOTE: optimized out when kDebugMode is false.
   bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
-                          (v & (kMuWait | kMuDesig)) != kMuWait);
+                         (v & (kMuWait | kMuDesig)) != kMuWait);
   // But, we can use an alternate computation of it, that compilers
   // currently don't find on their own.  When that changes, this function
   // can be simplified.
@@ -1692,10 +1742,9 @@
                  static_cast<long long>(v), static_cast<long long>(x),
                  static_cast<long long>(y));
   }
-  if (x < y &&
-      mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
-                                  std::memory_order_release,
-                                  std::memory_order_relaxed)) {
+  if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+                                           std::memory_order_release,
+                                           std::memory_order_relaxed)) {
     // fast writer release (writer with no waiters or with designated waker)
   } else {
     this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
@@ -1705,7 +1754,7 @@
 
 // Requires v to represent a reader-locked state.
 static bool ExactlyOneReader(intptr_t v) {
-  assert((v & (kMuWriter|kMuReader)) == kMuReader);
+  assert((v & (kMuWriter | kMuReader)) == kMuReader);
   assert((v & kMuHigh) != 0);
   // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
   // on some architectures the following generates slightly smaller code.
@@ -1714,16 +1763,15 @@
   return (v & kMuMultipleWaitersMask) == 0;
 }
 
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
+void Mutex::ReaderUnlock() {
   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
   DebugOnlyLockLeave(this);
   intptr_t v = mu_.load(std::memory_order_relaxed);
-  assert((v & (kMuWriter|kMuReader)) == kMuReader);
-  if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+  assert((v & (kMuWriter | kMuReader)) == kMuReader);
+  if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
     // fast reader release (reader with no waiters)
-    intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
-    if (mu_.compare_exchange_strong(v, v - clear,
-                                    std::memory_order_release,
+    intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+    if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
                                     std::memory_order_relaxed)) {
       ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
       return;
@@ -1733,26 +1781,36 @@
   ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
 }
 
-// The zap_desig_waker bitmask is used to clear the designated waker flag in
-// the mutex if this thread has blocked, and therefore may be the designated
-// waker.
-static const intptr_t zap_desig_waker[] = {
-    ~static_cast<intptr_t>(0),  // not blocked
-    ~static_cast<intptr_t>(
-        kMuDesig)  // blocked; turn off the designated waker bit
-};
+// Clears the designated waker flag in the mutex if this thread has blocked, and
+// therefore may be the designated waker.
+static intptr_t ClearDesignatedWakerMask(int flag) {
+  assert(flag >= 0);
+  assert(flag <= 1);
+  switch (flag) {
+    case 0:  // not blocked
+      return ~static_cast<intptr_t>(0);
+    case 1:  // blocked; turn off the designated waker bit
+      return ~static_cast<intptr_t>(kMuDesig);
+  }
+  ABSL_UNREACHABLE();
+}
 
-// The ignore_waiting_writers bitmask is used to ignore the existence
-// of waiting writers if a reader that has already blocked once
-// wakes up.
-static const intptr_t ignore_waiting_writers[] = {
-    ~static_cast<intptr_t>(0),  // not blocked
-    ~static_cast<intptr_t>(
-        kMuWrWait)  // blocked; pretend there are no waiting writers
-};
+// Conditionally ignores the existence of waiting writers if a reader that has
+// already blocked once wakes up.
+static intptr_t IgnoreWaitingWritersMask(int flag) {
+  assert(flag >= 0);
+  assert(flag <= 1);
+  switch (flag) {
+    case 0:  // not blocked
+      return ~static_cast<intptr_t>(0);
+    case 1:  // blocked; pretend there are no waiting writers
+      return ~static_cast<intptr_t>(kMuWrWait);
+  }
+  ABSL_UNREACHABLE();
+}
 
 // Internal version of LockWhen().  See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
                                              int flags) {
   ABSL_RAW_CHECK(
       this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
@@ -1760,7 +1818,7 @@
 }
 
 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
                                           bool locking, bool trylock,
                                           bool read_lock) {
   // Delicate annotation dance.
@@ -1769,8 +1827,8 @@
   // operation tsan considers that we've already released the mutex.
   bool res = false;
 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-  const int flags = read_lock ? __tsan_mutex_read_lock : 0;
-  const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
+  const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
+  const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
 #endif
   if (locking) {
     // For lock we pretend that we have finished the operation,
@@ -1810,7 +1868,7 @@
 // tsan). As the result there is no tsan-visible synchronization between the
 // addition and this thread. So if we would enable race detection here,
 // it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
   // Memory accesses are already ignored inside of lock/unlock operations,
   // but synchronization operations are also ignored. When we evaluate the
   // predicate we must ignore only memory accesses but not synchronization,
@@ -1835,14 +1893,16 @@
 //   obstruct this call
 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
 //   Await,  LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
                                  KernelTimeout t, int flags) {
   intptr_t v = mu_.load(std::memory_order_relaxed);
   bool unlock = false;
   if ((v & how->fast_need_zero) == 0 &&  // try fast acquire
       mu_.compare_exchange_strong(
-          v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
-                 how->fast_add,
+          v,
+          (how->fast_or |
+           (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
+              how->fast_add,
           std::memory_order_acquire, std::memory_order_relaxed)) {
     if (cond == nullptr ||
         EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
@@ -1850,9 +1910,9 @@
     }
     unlock = true;
   }
-  SynchWaitParams waitp(
-      how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
-      nullptr /*no cv_word*/);
+  SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
+                        Synch_GetPerThreadAnnotated(this),
+                        nullptr /*no cv_word*/);
   if (!Condition::GuaranteedEqual(cond, nullptr)) {
     flags |= kMuIsCond;
   }
@@ -1881,7 +1941,7 @@
   // Test for either of two situations that should not occur in v:
   //   kMuWriter and kMuReader
   //   kMuWrWait and !kMuWait
-  const uintptr_t w = v ^ kMuWait;
+  const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
   // By flipping that bit, we can now test for:
   //   kMuWriter and kMuReader in w
   //   kMuWrWait and kMuWait in w
@@ -1893,20 +1953,20 @@
   if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
   RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
                 "%s: Mutex corrupt: both reader and writer lock held: %p",
-                label, reinterpret_cast<void *>(v));
+                label, reinterpret_cast<void*>(v));
   RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
-                "%s: Mutex corrupt: waiting writer with no waiters: %p",
-                label, reinterpret_cast<void *>(v));
+                "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
+                reinterpret_cast<void*>(v));
   assert(false);
 }
 
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   int c = 0;
   intptr_t v = mu_.load(std::memory_order_relaxed);
   if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-         waitp->how == kExclusive?  SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+    PostSynchEvent(
+        this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
   }
   ABSL_RAW_CHECK(
       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -1916,9 +1976,10 @@
     CheckForMutexCorruption(v, "Lock");
     if ((v & waitp->how->slow_need_zero) == 0) {
       if (mu_.compare_exchange_strong(
-              v, (waitp->how->fast_or |
-                  (v & zap_desig_waker[flags & kMuHasBlocked])) +
-                     waitp->how->fast_add,
+              v,
+              (waitp->how->fast_or |
+               (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
+                  waitp->how->fast_add,
               std::memory_order_acquire, std::memory_order_relaxed)) {
         if (waitp->cond == nullptr ||
             EvalConditionAnnotated(waitp->cond, this, true, false,
@@ -1930,13 +1991,14 @@
         flags |= kMuHasBlocked;
         c = 0;
       }
-    } else {                      // need to access waiter list
+    } else {  // need to access waiter list
       bool dowait = false;
-      if ((v & (kMuSpin|kMuWait)) == 0) {   // no waiters
+      if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters
         // This thread tries to become the one and only waiter.
-        PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
-        intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
-                      kMuWait;
+        PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
+        intptr_t nv =
+            (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
+            kMuWait;
         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
         if (waitp->how == kExclusive && (v & kMuReader) != 0) {
           nv |= kMuWrWait;
@@ -1945,21 +2007,22 @@
                 v, reinterpret_cast<intptr_t>(new_h) | nv,
                 std::memory_order_release, std::memory_order_relaxed)) {
           dowait = true;
-        } else {            // attempted Enqueue() failed
+        } else {  // attempted Enqueue() failed
           // zero out the waitp field set by Enqueue()
           waitp->thread->waitp = nullptr;
         }
       } else if ((v & waitp->how->slow_inc_need_zero &
-                  ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
+                  IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
         // This is a reader that needs to increment the reader count,
         // but the count is currently held in the last waiter.
         if (mu_.compare_exchange_strong(
-                v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
-                       kMuReader,
+                v,
+                (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
+                    kMuSpin | kMuReader,
                 std::memory_order_acquire, std::memory_order_relaxed)) {
-          PerThreadSynch *h = GetPerThreadSynch(v);
-          h->readers += kMuOne;       // inc reader count in waiter
-          do {                        // release spinlock
+          PerThreadSynch* h = GetPerThreadSynch(v);
+          h->readers += kMuOne;  // inc reader count in waiter
+          do {                   // release spinlock
             v = mu_.load(std::memory_order_relaxed);
           } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
                                               std::memory_order_release,
@@ -1969,28 +2032,30 @@
                                      waitp->how == kShared)) {
             break;  // we timed out, or condition true, so return
           }
-          this->UnlockSlow(waitp);           // got lock but condition false
+          this->UnlockSlow(waitp);  // got lock but condition false
           this->Block(waitp->thread);
           flags |= kMuHasBlocked;
           c = 0;
         }
       } else if ((v & kMuSpin) == 0 &&  // attempt to queue ourselves
                  mu_.compare_exchange_strong(
-                     v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
-                            kMuWait,
+                     v,
+                     (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
+                         kMuSpin | kMuWait,
                      std::memory_order_acquire, std::memory_order_relaxed)) {
-        PerThreadSynch *h = GetPerThreadSynch(v);
-        PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+        PerThreadSynch* h = GetPerThreadSynch(v);
+        PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
         intptr_t wr_wait = 0;
         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
         if (waitp->how == kExclusive && (v & kMuReader) != 0) {
-          wr_wait = kMuWrWait;      // give priority to a waiting writer
+          wr_wait = kMuWrWait;  // give priority to a waiting writer
         }
-        do {                        // release spinlock
+        do {  // release spinlock
           v = mu_.load(std::memory_order_relaxed);
         } while (!mu_.compare_exchange_weak(
-            v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
-            reinterpret_cast<intptr_t>(new_h),
+            v,
+            (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+                reinterpret_cast<intptr_t>(new_h),
             std::memory_order_release, std::memory_order_relaxed));
         dowait = true;
       }
@@ -2010,9 +2075,9 @@
       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
       "detected illegal recursion into Mutex code");
   if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-                   waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
-                                      SYNCH_EV_READERLOCK_RETURNING);
+    PostSynchEvent(this, waitp->how == kExclusive
+                             ? SYNCH_EV_LOCK_RETURNING
+                             : SYNCH_EV_READERLOCK_RETURNING);
   }
 }
 
@@ -2021,28 +2086,28 @@
 // which holds the lock but is not runnable because its condition is false
 // or it is in the process of blocking on a condition variable; it must requeue
 // itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   intptr_t v = mu_.load(std::memory_order_relaxed);
   this->AssertReaderHeld();
   CheckForMutexCorruption(v, "Unlock");
   if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-                (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+    PostSynchEvent(
+        this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
   }
   int c = 0;
   // the waiter under consideration to wake, or zero
-  PerThreadSynch *w = nullptr;
+  PerThreadSynch* w = nullptr;
   // the predecessor to w or zero
-  PerThreadSynch *pw = nullptr;
+  PerThreadSynch* pw = nullptr;
   // head of the list searched previously, or zero
-  PerThreadSynch *old_h = nullptr;
+  PerThreadSynch* old_h = nullptr;
   // a condition that's known to be false.
-  const Condition *known_false = nullptr;
-  PerThreadSynch *wake_list = kPerThreadSynchNull;   // list of threads to wake
-  intptr_t wr_wait = 0;        // set to kMuWrWait if we wake a reader and a
-                               // later writer could have acquired the lock
-                               // (starvation avoidance)
+  const Condition* known_false = nullptr;
+  PerThreadSynch* wake_list = kPerThreadSynchNull;  // list of threads to wake
+  intptr_t wr_wait = 0;  // set to kMuWrWait if we wake a reader and a
+                         // later writer could have acquired the lock
+                         // (starvation avoidance)
   ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
                      waitp->thread->suppress_fatal_errors,
                  "detected illegal recursion into Mutex code");
@@ -2062,8 +2127,7 @@
     } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
       // fast reader release (reader with no waiters)
       intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
-      if (mu_.compare_exchange_strong(v, v - clear,
-                                      std::memory_order_release,
+      if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
                                       std::memory_order_relaxed)) {
         return;
       }
@@ -2071,16 +2135,16 @@
                mu_.compare_exchange_strong(v, v | kMuSpin,
                                            std::memory_order_acquire,
                                            std::memory_order_relaxed)) {
-      if ((v & kMuWait) == 0) {       // no one to wake
+      if ((v & kMuWait) == 0) {  // no one to wake
         intptr_t nv;
         bool do_enqueue = true;  // always Enqueue() the first time
         ABSL_RAW_CHECK(waitp != nullptr,
                        "UnlockSlow is confused");  // about to sleep
-        do {    // must loop to release spinlock as reader count may change
+        do {  // must loop to release spinlock as reader count may change
           v = mu_.load(std::memory_order_relaxed);
           // decrement reader count if there are readers
-          intptr_t new_readers = (v >= kMuOne)?  v - kMuOne : v;
-          PerThreadSynch *new_h = nullptr;
+          intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
+          PerThreadSynch* new_h = nullptr;
           if (do_enqueue) {
             // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
             // we must not retry here.  The initial attempt will always have
@@ -2104,21 +2168,20 @@
           }
           // release spinlock & our lock; retry if reader-count changed
           // (writer count cannot change since we hold lock)
-        } while (!mu_.compare_exchange_weak(v, nv,
-                                            std::memory_order_release,
+        } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
                                             std::memory_order_relaxed));
         break;
       }
 
       // There are waiters.
       // Set h to the head of the circular waiter list.
-      PerThreadSynch *h = GetPerThreadSynch(v);
+      PerThreadSynch* h = GetPerThreadSynch(v);
       if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
         // a reader but not the last
-        h->readers -= kMuOne;  // release our lock
-        intptr_t nv = v;       // normally just release spinlock
+        h->readers -= kMuOne;    // release our lock
+        intptr_t nv = v;         // normally just release spinlock
         if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves
-          PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+          PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
           ABSL_RAW_CHECK(new_h != nullptr,
                          "waiters disappeared during Enqueue()!");
           nv &= kMuLow;
@@ -2136,17 +2199,17 @@
 
       // The lock is becoming free, and there's a waiter
       if (old_h != nullptr &&
-          !old_h->may_skip) {                  // we used old_h as a terminator
-        old_h->may_skip = true;                // allow old_h to skip once more
+          !old_h->may_skip) {    // we used old_h as a terminator
+        old_h->may_skip = true;  // allow old_h to skip once more
         ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
-        if (h != old_h && MuSameCondition(old_h, old_h->next)) {
+        if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
           old_h->skip = old_h->next;  // old_h not head & can skip to successor
         }
       }
       if (h->next->waitp->how == kExclusive &&
           Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
         // easy case: writer with no condition; no need to search
-        pw = h;                       // wake w, the successor of h (=pw)
+        pw = h;  // wake w, the successor of h (=pw)
         w = h->next;
         w->wake = true;
         // We are waking up a writer.  This writer may be racing against
@@ -2169,13 +2232,13 @@
         // waiter has a condition or is a reader.  We avoid searching over
         // waiters we've searched on previous iterations by starting at
         // old_h if it's set.  If old_h==h, there's no one to wakeup at all.
-        if (old_h == h) {      // we've searched before, and nothing's new
-                               // so there's no one to wake.
-          intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+        if (old_h == h) {  // we've searched before, and nothing's new
+                           // so there's no one to wake.
+          intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
           h->readers = 0;
-          h->maybe_unlocking = false;   // finished unlocking
-          if (waitp != nullptr) {       // we must queue ourselves and sleep
-            PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+          h->maybe_unlocking = false;  // finished unlocking
+          if (waitp != nullptr) {      // we must queue ourselves and sleep
+            PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
             nv &= kMuLow;
             if (new_h != nullptr) {
               nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
@@ -2189,12 +2252,12 @@
         }
 
         // set up to walk the list
-        PerThreadSynch *w_walk;   // current waiter during list walk
-        PerThreadSynch *pw_walk;  // previous waiter during list walk
+        PerThreadSynch* w_walk;   // current waiter during list walk
+        PerThreadSynch* pw_walk;  // previous waiter during list walk
         if (old_h != nullptr) {  // we've searched up to old_h before
           pw_walk = old_h;
           w_walk = old_h->next;
-        } else {            // no prior search, start at beginning
+        } else {  // no prior search, start at beginning
           pw_walk =
               nullptr;  // h->next's predecessor may change; don't record it
           w_walk = h->next;
@@ -2220,7 +2283,7 @@
         // to walk the path from w_walk to h inclusive. (TryRemove() can remove
         // a waiter anywhere, but it acquires both the spinlock and the Mutex)
 
-        old_h = h;        // remember we searched to here
+        old_h = h;  // remember we searched to here
 
         // Walk the path upto and including h looking for waiters we can wake.
         while (pw_walk != h) {
@@ -2232,24 +2295,24 @@
                //  is in fact true
                EvalConditionIgnored(this, w_walk->waitp->cond))) {
             if (w == nullptr) {
-              w_walk->wake = true;    // can wake this waiter
+              w_walk->wake = true;  // can wake this waiter
               w = w_walk;
               pw = pw_walk;
               if (w_walk->waitp->how == kExclusive) {
                 wr_wait = kMuWrWait;
-                break;                // bail if waking this writer
+                break;  // bail if waking this writer
               }
             } else if (w_walk->waitp->how == kShared) {  // wake if a reader
               w_walk->wake = true;
-            } else {   // writer with true condition
+            } else {  // writer with true condition
               wr_wait = kMuWrWait;
             }
-          } else {                  // can't wake; condition false
+          } else {                              // can't wake; condition false
             known_false = w_walk->waitp->cond;  // remember last false condition
           }
-          if (w_walk->wake) {   // we're waking reader w_walk
-            pw_walk = w_walk;   // don't skip similar waiters
-          } else {              // not waking; skip as much as possible
+          if (w_walk->wake) {  // we're waking reader w_walk
+            pw_walk = w_walk;  // don't skip similar waiters
+          } else {             // not waking; skip as much as possible
             pw_walk = Skip(w_walk);
           }
           // If pw_walk == h, then load of pw_walk->next can race with
@@ -2276,8 +2339,8 @@
       h = DequeueAllWakeable(h, pw, &wake_list);
 
       intptr_t nv = (v & kMuEvent) | kMuDesig;
-                                             // assume no waiters left,
-                                             // set kMuDesig for INV1a
+      // assume no waiters left,
+      // set kMuDesig for INV1a
 
       if (waitp != nullptr) {  // we must queue ourselves and sleep
         h = Enqueue(h, waitp, v, kMuIsCond);
@@ -2290,7 +2353,7 @@
 
       if (h != nullptr) {  // there are waiters left
         h->readers = 0;
-        h->maybe_unlocking = false;     // finished unlocking
+        h->maybe_unlocking = false;  // finished unlocking
         nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
       }
 
@@ -2301,21 +2364,29 @@
     }
     // aggressive here; no one can proceed till we do
     c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
-  }                            // end of for(;;)-loop
+  }  // end of for(;;)-loop
 
   if (wake_list != kPerThreadSynchNull) {
-    int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
-    bool cond_waiter = wake_list->cond_waiter;
+    int64_t total_wait_cycles = 0;
+    int64_t max_wait_cycles = 0;
+    int64_t now = CycleClock::Now();
     do {
-      wake_list = Wakeup(wake_list);              // wake waiters
+      // Profile lock contention events only if the waiter was trying to acquire
+      // the lock, not waiting on a condition variable or Condition.
+      if (!wake_list->cond_waiter) {
+        int64_t cycles_waited =
+            (now - wake_list->waitp->contention_start_cycles);
+        total_wait_cycles += cycles_waited;
+        if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
+        wake_list->waitp->contention_start_cycles = now;
+        wake_list->waitp->should_submit_contention_data = true;
+      }
+      wake_list = Wakeup(wake_list);  // wake waiters
     } while (wake_list != kPerThreadSynchNull);
-    if (!cond_waiter) {
-      // Sample lock contention events only if the (first) waiter was trying to
-      // acquire the lock, not waiting on a condition variable or Condition.
-      int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp;
-      mutex_tracer("slow release", this, wait_cycles);
+    if (total_wait_cycles > 0) {
+      mutex_tracer("slow release", this, total_wait_cycles);
       ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
-      submit_profile_data(enqueue_timestamp);
+      submit_profile_data(total_wait_cycles);
       ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
     }
   }
@@ -2338,7 +2409,7 @@
 // condition variable.  If this mutex is free, we simply wake the thread.
 // It will later acquire the mutex with high probability.  Otherwise, we
 // enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
+void Mutex::Fer(PerThreadSynch* w) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   int c = 0;
   ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2363,9 +2434,9 @@
       IncrementSynchSem(this, w);
       return;
     } else {
-      if ((v & (kMuSpin|kMuWait)) == 0) {       // no waiters
+      if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters
         // This thread tries to become the one and only waiter.
-        PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+        PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
         ABSL_RAW_CHECK(new_h != nullptr,
                        "Enqueue failed");  // we must queue ourselves
         if (mu_.compare_exchange_strong(
@@ -2375,8 +2446,8 @@
         }
       } else if ((v & kMuSpin) == 0 &&
                  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
-        PerThreadSynch *h = GetPerThreadSynch(v);
-        PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+        PerThreadSynch* h = GetPerThreadSynch(v);
+        PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
         ABSL_RAW_CHECK(new_h != nullptr,
                        "Enqueue failed");  // we must queue ourselves
         do {
@@ -2395,19 +2466,18 @@
 
 void Mutex::AssertHeld() const {
   if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
-    SynchEvent *e = GetSynchEvent(this);
+    SynchEvent* e = GetSynchEvent(this);
     ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
-                 static_cast<const void *>(this),
-                 (e == nullptr ? "" : e->name));
+                 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
   }
 }
 
 void Mutex::AssertReaderHeld() const {
   if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
-    SynchEvent *e = GetSynchEvent(this);
-    ABSL_RAW_LOG(
-        FATAL, "thread should hold at least a read lock on Mutex %p %s",
-        static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+    SynchEvent* e = GetSynchEvent(this);
+    ABSL_RAW_LOG(FATAL,
+                 "thread should hold at least a read lock on Mutex %p %s",
+                 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
   }
 }
 
@@ -2418,13 +2488,17 @@
 static const intptr_t kCvLow = 0x0003L;  // low order bits of CV
 
 // Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+enum {
+  kGdbCvSpin = kCvSpin,
+  kGdbCvEvent = kCvEvent,
+  kGdbCvLow = kCvLow,
+};
 
 static_assert(PerThreadSynch::kAlignment > kCvLow,
               "PerThreadSynch::kAlignment must be greater than kCvLow");
 
-void CondVar::EnableDebugLog(const char *name) {
-  SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+void CondVar::EnableDebugLog(const char* name) {
+  SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
   e->log = true;
   UnrefSynchEvent(e);
 }
@@ -2435,25 +2509,23 @@
   }
 }
 
-
 // Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
+void CondVar::Remove(PerThreadSynch* s) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   intptr_t v;
   int c = 0;
   for (v = cv_.load(std::memory_order_relaxed);;
        v = cv_.load(std::memory_order_relaxed)) {
     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
-        cv_.compare_exchange_strong(v, v | kCvSpin,
-                                    std::memory_order_acquire,
+        cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
       if (h != nullptr) {
-        PerThreadSynch *w = h;
+        PerThreadSynch* w = h;
         while (w->next != s && w->next != h) {  // search for thread
           w = w->next;
         }
-        if (w->next == s) {           // found thread; remove it
+        if (w->next == s) {  // found thread; remove it
           w->next = s->next;
           if (h == s) {
             h = (w == s) ? nullptr : w;
@@ -2462,7 +2534,7 @@
           s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
         }
       }
-                                      // release spinlock
+      // release spinlock
       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
                 std::memory_order_release);
       return;
@@ -2480,19 +2552,19 @@
 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
 // the logging code, or via a Condition function) and might potentially attempt
 // to block this thread.  That would be a problem if the thread were already on
-// a the condition variable waiter queue.  Thus, we use the waitp->cv_word
-// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
-// condition variable queue just before the mutex is to be unlocked, and (most
+// a condition variable waiter queue.  Thus, we use the waitp->cv_word to tell
+// the unlock code to call CondVarEnqueue() to queue the thread on the condition
+// variable queue just before the mutex is to be unlocked, and (most
 // importantly) after any call to an external routine that might re-enter the
 // mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
+static void CondVarEnqueue(SynchWaitParams* waitp) {
   // This thread might be transferred to the Mutex queue by Fer() when
   // we are woken.  To make sure that is what happens, Enqueue() doesn't
   // call CondVarEnqueue() again but instead uses its normal code.  We
   // must do this before we queue ourselves so that cv_word will be null
   // when seen by the dequeuer, who may wish immediately to requeue
   // this thread on another queue.
-  std::atomic<intptr_t> *cv_word = waitp->cv_word;
+  std::atomic<intptr_t>* cv_word = waitp->cv_word;
   waitp->cv_word = nullptr;
 
   intptr_t v = cv_word->load(std::memory_order_relaxed);
@@ -2505,8 +2577,8 @@
     v = cv_word->load(std::memory_order_relaxed);
   }
   ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
-  waitp->thread->waitp = waitp;      // prepare ourselves for waiting
-  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+  waitp->thread->waitp = waitp;  // prepare ourselves for waiting
+  PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
   if (h == nullptr) {  // add this thread to waiter list
     waitp->thread->next = waitp->thread;
   } else {
@@ -2519,8 +2591,8 @@
                  std::memory_order_release);
 }
 
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
-  bool rc = false;          // return value; true iff we timed-out
+bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
+  bool rc = false;  // return value; true iff we timed-out
 
   intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
   Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
@@ -2545,6 +2617,23 @@
   while (waitp.thread->state.load(std::memory_order_acquire) ==
          PerThreadSynch::kQueued) {
     if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
+      // DecrementSynchSem returned due to timeout.
+      // Now we will either (1) remove ourselves from the wait list in Remove
+      // below, in which case Remove will set thread.state = kAvailable and
+      // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
+      // has removed us concurrently and is calling Wakeup, which will set
+      // thread.state = kAvailable and post to the semaphore.
+      // It's important to reset the timeout for the case (2) because otherwise
+      // we can live-lock in this loop since DecrementSynchSem will always
+      // return immediately due to timeout, but Signal/SignalAll is not
+      // necessary set thread.state = kAvailable yet (and is not scheduled
+      // due to thread priorities or other scheduler artifacts).
+      // Note this could also be resolved if Signal/SignalAll would set
+      // thread.state = kAvailable while holding the wait list spin lock.
+      // But this can't be easily done for SignalAll since it grabs the whole
+      // wait list with a single compare-exchange and does not really grab
+      // the spin lock.
+      t = KernelTimeout::Never();
       this->Remove(waitp.thread);
       rc = true;
     }
@@ -2570,27 +2659,25 @@
   return rc;
 }
 
-bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
-  return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
+  return WaitCommon(mu, KernelTimeout(timeout));
 }
 
-bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
   return WaitCommon(mu, KernelTimeout(deadline));
 }
 
-void CondVar::Wait(Mutex *mu) {
-  WaitCommon(mu, KernelTimeout::Never());
-}
+void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
 
 // Wake thread w
 // If it was a timed wait, w will be waiting on w->cv
 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
 // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
+void CondVar::Wakeup(PerThreadSynch* w) {
   if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
     // The waiting thread only needs to observe "w->state == kAvailable" to be
     // released, we must cache "cvmu" before clearing "next".
-    Mutex *mu = w->waitp->cvmu;
+    Mutex* mu = w->waitp->cvmu;
     w->next = nullptr;
     w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
     Mutex::IncrementSynchSem(mu, w);
@@ -2607,11 +2694,10 @@
   for (v = cv_.load(std::memory_order_relaxed); v != 0;
        v = cv_.load(std::memory_order_relaxed)) {
     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
-        cv_.compare_exchange_strong(v, v | kCvSpin,
-                                    std::memory_order_acquire,
+        cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
-      PerThreadSynch *w = nullptr;
+      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
+      PerThreadSynch* w = nullptr;
       if (h != nullptr) {  // remove first waiter
         w = h->next;
         if (w == h) {
@@ -2620,11 +2706,11 @@
           h->next = w->next;
         }
       }
-                                      // release spinlock
+      // release spinlock
       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
                 std::memory_order_release);
       if (w != nullptr) {
-        CondVar::Wakeup(w);                // wake waiter, if there was one
+        CondVar::Wakeup(w);  // wake waiter, if there was one
         cond_var_tracer("Signal wakeup", this);
       }
       if ((v & kCvEvent) != 0) {
@@ -2639,7 +2725,7 @@
   ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
 }
 
-void CondVar::SignalAll () {
+void CondVar::SignalAll() {
   ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
   intptr_t v;
   int c = 0;
@@ -2653,11 +2739,11 @@
     if ((v & kCvSpin) == 0 &&
         cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
       if (h != nullptr) {
-        PerThreadSynch *w;
-        PerThreadSynch *n = h->next;
-        do {                          // for every thread, wake it up
+        PerThreadSynch* w;
+        PerThreadSynch* n = h->next;
+        do {  // for every thread, wake it up
           w = n;
           n = n->next;
           CondVar::Wakeup(w);
@@ -2685,54 +2771,60 @@
 }
 
 #ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
+extern "C" void __tsan_read1(void* addr);
 #else
 #define __tsan_read1(addr)  // do nothing if TSan not enabled
 #endif
 
 // A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
+static bool Dereference(void* arg) {
   // ThreadSanitizer does not instrument this file for memory accesses.
   // This function dereferences a user variable that can participate
   // in a data race, so we need to manually tell TSan about this memory access.
   __tsan_read1(arg);
-  return *(static_cast<bool *>(arg));
+  return *(static_cast<bool*>(arg));
 }
 
-Condition::Condition() {}   // null constructor, used for kTrue only
-const Condition Condition::kTrue;
+ABSL_CONST_INIT const Condition Condition::kTrue;
 
-Condition::Condition(bool (*func)(void *), void *arg)
-    : eval_(&CallVoidPtrFunction),
-      function_(func),
-      method_(nullptr),
-      arg_(arg) {}
-
-bool Condition::CallVoidPtrFunction(const Condition *c) {
-  return (*c->function_)(c->arg_);
+Condition::Condition(bool (*func)(void*), void* arg)
+    : eval_(&CallVoidPtrFunction), arg_(arg) {
+  static_assert(sizeof(&func) <= sizeof(callback_),
+                "An overlarge function pointer passed to Condition.");
+  StoreCallback(func);
 }
 
-Condition::Condition(const bool *cond)
+bool Condition::CallVoidPtrFunction(const Condition* c) {
+  using FunctionPointer = bool (*)(void*);
+  FunctionPointer function_pointer;
+  std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
+  return (*function_pointer)(c->arg_);
+}
+
+Condition::Condition(const bool* cond)
     : eval_(CallVoidPtrFunction),
-      function_(Dereference),
-      method_(nullptr),
       // const_cast is safe since Dereference does not modify arg
-      arg_(const_cast<bool *>(cond)) {}
+      arg_(const_cast<bool*>(cond)) {
+  using FunctionPointer = bool (*)(void*);
+  const FunctionPointer dereference = Dereference;
+  StoreCallback(dereference);
+}
 
 bool Condition::Eval() const {
   // eval_ == null for kTrue
   return (this->eval_ == nullptr) || (*this->eval_)(this);
 }
 
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
-  if (a == nullptr) {
+bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
+  // kTrue logic.
+  if (a == nullptr || a->eval_ == nullptr) {
     return b == nullptr || b->eval_ == nullptr;
+  } else if (b == nullptr || b->eval_ == nullptr) {
+    return false;
   }
-  if (b == nullptr || b->eval_ == nullptr) {
-    return a->eval_ == nullptr;
-  }
-  return a->eval_ == b->eval_ && a->function_ == b->function_ &&
-         a->arg_ == b->arg_ && a->method_ == b->method_;
+  // Check equality of the representative fields.
+  return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
+         !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
 }
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/synchronization/mutex.h b/abseil-cpp/absl/synchronization/mutex.h
index 52401fe..645c26d 100644
--- a/abseil-cpp/absl/synchronization/mutex.h
+++ b/abseil-cpp/absl/synchronization/mutex.h
@@ -31,22 +31,23 @@
 //
 //  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
 //              write access within the current scope.
+//
 //  ReaderMutexLock
 //            - An RAII wrapper to acquire and release a `Mutex` for shared/read
 //              access within the current scope.
 //
 //  WriterMutexLock
-//            - Alias for `MutexLock` above, designed for use in distinguishing
-//              reader and writer locks within code.
+//            - Effectively an alias for `MutexLock` above, designed for use in
+//              distinguishing reader and writer locks within code.
 //
 // In addition to simple mutex locks, this file also defines ways to perform
 // locking under certain conditions.
 //
-//  Condition   - (Preferred) Used to wait for a particular predicate that
-//                depends on state protected by the `Mutex` to become true.
-//  CondVar     - A lower-level variant of `Condition` that relies on
-//                application code to explicitly signal the `CondVar` when
-//                a condition has been met.
+//  Condition - (Preferred) Used to wait for a particular predicate that
+//              depends on state protected by the `Mutex` to become true.
+//  CondVar   - A lower-level variant of `Condition` that relies on
+//              application code to explicitly signal the `CondVar` when
+//              a condition has been met.
 //
 // See below for more information on using `Condition` or `CondVar`.
 //
@@ -59,6 +60,8 @@
 
 #include <atomic>
 #include <cstdint>
+#include <cstring>
+#include <iterator>
 #include <string>
 
 #include "absl/base/const_init.h"
@@ -72,15 +75,6 @@
 #include "absl/synchronization/internal/per_thread_sem.h"
 #include "absl/time/time.h"
 
-// Decide if we should use the non-production implementation because
-// the production implementation hasn't been fully ported yet.
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
-#error ABSL_INTERNAL_USE_NONPROD_MUTEX cannot be directly set
-#elif defined(ABSL_LOW_LEVEL_ALLOC_MISSING)
-#define ABSL_INTERNAL_USE_NONPROD_MUTEX 1
-#include "absl/synchronization/internal/mutex_nonprod.inc"
-#endif
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
@@ -98,26 +92,42 @@
 //
 // A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
 // The `Lock()` operation *acquires* a `Mutex` (in a state known as an
-// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
 // Mutex. During the span of time between the Lock() and Unlock() operations,
-// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// a mutex is said to be *held*. By design, all mutexes support exclusive/write
 // locks, as this is the most common way to use a mutex.
 //
+// Mutex operations are only allowed under certain conditions; otherwise an
+// operation is "invalid", and disallowed by the API. The conditions concern
+// both the current state of the mutex and the identity of the threads that
+// are performing the operations.
+//
 // The `Mutex` state machine for basic lock/unlock operations is quite simple:
 //
-// |                | Lock()     | Unlock() |
-// |----------------+------------+----------|
-// | Free           | Exclusive  | invalid  |
-// | Exclusive      | blocks     | Free     |
+// |                | Lock()                 | Unlock() |
+// |----------------+------------------------+----------|
+// | Free           | Exclusive              | invalid  |
+// | Exclusive      | blocks, then exclusive | Free     |
 //
-// Attempts to `Unlock()` must originate from the thread that performed the
-// corresponding `Lock()` operation.
+// The full conditions are as follows.
 //
-// An "invalid" operation is disallowed by the API. The `Mutex` implementation
-// is allowed to do anything on an invalid call, including but not limited to
+// * Calls to `Unlock()` require that the mutex be held, and must be made in the
+//   same thread that performed the corresponding `Lock()` operation which
+//   acquired the mutex; otherwise the call is invalid.
+//
+// * The mutex being non-reentrant (or non-recursive) means that a call to
+//   `Lock()` or `TryLock()` must not be made in a thread that already holds the
+//   mutex; such a call is invalid.
+//
+// * In other words, the state of being "held" has both a temporal component
+//   (from `Lock()` until `Unlock()`) as well as a thread identity component:
+//   the mutex is held *by a particular thread*.
+//
+// An "invalid" operation has undefined behavior. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including, but not limited to,
 // crashing with a useful error message, silently succeeding, or corrupting
-// data structures. In debug mode, the implementation attempts to crash with a
-// useful error message.
+// data structures. In debug mode, the implementation may crash with a useful
+// error message.
 //
 // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
 // is, however, approximately fair over long periods, and starvation-free for
@@ -131,8 +141,9 @@
 // issues that could potentially result in race conditions and deadlocks.
 //
 // For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
+// [Thread Safety
+// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
+// documentation.
 //
 // See also `MutexLock`, below, for scoped `Mutex` acquisition.
 
@@ -155,7 +166,7 @@
   //
   // Example usage:
   //   namespace foo {
-  //   ABSL_CONST_INIT Mutex mu(absl::kConstInit);
+  //   ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
   //   }
   explicit constexpr Mutex(absl::ConstInitType);
 
@@ -170,7 +181,7 @@
   // Mutex::Unlock()
   //
   // Releases this `Mutex` and returns it from the exclusive/write state to the
-  // free state. Caller must hold the `Mutex` exclusively.
+  // free state. Calling thread must hold the `Mutex` exclusively.
   void Unlock() ABSL_UNLOCK_FUNCTION();
 
   // Mutex::TryLock()
@@ -182,9 +193,12 @@
 
   // Mutex::AssertHeld()
   //
-  // Return immediately if this thread holds the `Mutex` exclusively (in write
-  // mode). Otherwise, may report an error (typically by crashing with a
-  // diagnostic), or may return immediately.
+  // Require that the mutex be held exclusively (write mode) by this thread.
+  //
+  // If the mutex is not currently held by this thread, this function may report
+  // an error (typically by crashing with a diagnostic) or it may do nothing.
+  // This function is intended only as a tool to assist debugging; it doesn't
+  // guarantee correctness.
   void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
 
   // ---------------------------------------------------------------------------
@@ -244,9 +258,13 @@
 
   // Mutex::AssertReaderHeld()
   //
-  // Returns immediately if this thread holds the `Mutex` in at least shared
-  // mode (read mode). Otherwise, may report an error (typically by
-  // crashing with a diagnostic), or may return immediately.
+  // Require that the mutex be held at least in shared mode (read mode) by this
+  // thread.
+  //
+  // If the mutex is not currently held by this thread, this function may report
+  // an error (typically by crashing with a diagnostic) or it may do nothing.
+  // This function is intended only as a tool to assist debugging; it doesn't
+  // guarantee correctness.
   void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
 
   // Mutex::WriterLock()
@@ -256,7 +274,7 @@
   // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
   //
   // These methods may be used (along with the complementary `Reader*()`
-  // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
+  // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
   // etc.) from reader/writer lock usage.
   void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
 
@@ -306,7 +324,7 @@
   // `true`, `Await()` *may* skip the release/re-acquire step.
   //
   // `Await()` requires that this thread holds this `Mutex` in some mode.
-  void Await(const Condition &cond);
+  void Await(const Condition& cond);
 
   // Mutex::LockWhen()
   // Mutex::ReaderLockWhen()
@@ -316,11 +334,11 @@
   // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
   // logically equivalent to `*Lock(); Await();` though they may have different
   // performance characteristics.
-  void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
+  void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
 
-  void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
+  void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION();
 
-  void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+  void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     this->LockWhen(cond);
   }
 
@@ -345,9 +363,9 @@
   // Negative timeouts are equivalent to a zero timeout.
   //
   // This method requires that this thread holds this `Mutex` in some mode.
-  bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
+  bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout);
 
-  bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
+  bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
 
   // Mutex::LockWhenWithTimeout()
   // Mutex::ReaderLockWhenWithTimeout()
@@ -360,11 +378,11 @@
   // `true` on return.
   //
   // Negative timeouts are equivalent to a zero timeout.
-  bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+  bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
       ABSL_EXCLUSIVE_LOCK_FUNCTION();
-  bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+  bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
       ABSL_SHARED_LOCK_FUNCTION();
-  bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+  bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     return this->LockWhenWithTimeout(cond, timeout);
   }
@@ -380,11 +398,11 @@
   // on return.
   //
   // Deadlines in the past are equivalent to an immediate deadline.
-  bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+  bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
       ABSL_EXCLUSIVE_LOCK_FUNCTION();
-  bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+  bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
       ABSL_SHARED_LOCK_FUNCTION();
-  bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+  bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     return this->LockWhenWithDeadline(cond, deadline);
   }
@@ -406,7 +424,7 @@
   // substantially reduce `Mutex` performance; it should be set only for
   // non-production runs.  Optimization options may also disable invariant
   // checks.
-  void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+  void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
 
   // Mutex::EnableDebugLog()
   //
@@ -415,7 +433,7 @@
   // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
   //
   // Note: This method substantially reduces `Mutex` performance.
-  void EnableDebugLog(const char *name);
+  void EnableDebugLog(const char* name);
 
   // Deadlock detection
 
@@ -443,7 +461,7 @@
 
   // A `MuHow` is a constant that indicates how a lock should be acquired.
   // Internal implementation detail.  Clients should ignore.
-  typedef const struct MuHowS *MuHow;
+  typedef const struct MuHowS* MuHow;
 
   // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
   //
@@ -461,53 +479,41 @@
   static void InternalAttemptToUseMutexInFatalSignalHandler();
 
  private:
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
-  friend class CondVar;
-
-  synchronization_internal::MutexImpl *impl() { return impl_.get(); }
-
-  synchronization_internal::SynchronizationStorage<
-      synchronization_internal::MutexImpl>
-      impl_;
-#else
   std::atomic<intptr_t> mu_;  // The Mutex state.
 
   // Post()/Wait() versus associated PerThreadSem; in class for required
   // friendship with PerThreadSem.
-  static inline void IncrementSynchSem(Mutex *mu,
-                                       base_internal::PerThreadSynch *w);
-  static inline bool DecrementSynchSem(
-      Mutex *mu, base_internal::PerThreadSynch *w,
-      synchronization_internal::KernelTimeout t);
+  static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
+  static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
+                                synchronization_internal::KernelTimeout t);
 
   // slow path acquire
-  void LockSlowLoop(SynchWaitParams *waitp, int flags);
+  void LockSlowLoop(SynchWaitParams* waitp, int flags);
   // wrappers around LockSlowLoop()
-  bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+  bool LockSlowWithDeadline(MuHow how, const Condition* cond,
                             synchronization_internal::KernelTimeout t,
                             int flags);
-  void LockSlow(MuHow how, const Condition *cond,
+  void LockSlow(MuHow how, const Condition* cond,
                 int flags) ABSL_ATTRIBUTE_COLD;
   // slow path release
-  void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+  void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
   // Common code between Await() and AwaitWithTimeout/Deadline()
-  bool AwaitCommon(const Condition &cond,
+  bool AwaitCommon(const Condition& cond,
                    synchronization_internal::KernelTimeout t);
   // Attempt to remove thread s from queue.
-  void TryRemove(base_internal::PerThreadSynch *s);
+  void TryRemove(base_internal::PerThreadSynch* s);
   // Block a thread on mutex.
-  void Block(base_internal::PerThreadSynch *s);
+  void Block(base_internal::PerThreadSynch* s);
   // Wake a thread; return successor.
-  base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+  base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
 
   friend class CondVar;   // for access to Trans()/Fer().
   void Trans(MuHow how);  // used for CondVar->Mutex transfer
   void Fer(
-      base_internal::PerThreadSynch *w);  // used for CondVar->Mutex transfer
-#endif
+      base_internal::PerThreadSynch* w);  // used for CondVar->Mutex transfer
 
   // Catch the error of writing Mutex when intending MutexLock.
-  Mutex(const volatile Mutex * /*ignored*/) {}  // NOLINT(runtime/explicit)
+  explicit Mutex(const volatile Mutex* /*ignored*/) {}
 
   Mutex(const Mutex&) = delete;
   Mutex& operator=(const Mutex&) = delete;
@@ -525,31 +531,45 @@
 // Example:
 //
 // Class Foo {
-//
+//  public:
 //   Foo::Bar* Baz() {
-//     MutexLock l(&lock_);
+//     MutexLock lock(&mu_);
 //     ...
 //     return bar;
 //   }
 //
 // private:
-//   Mutex lock_;
+//   Mutex mu_;
 // };
 class ABSL_SCOPED_LOCKABLE MutexLock {
  public:
-  explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+  // Constructors
+
+  // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
+  // guaranteed to be locked when this object is constructed. Requires that
+  // `mu` be dereferenceable.
+  explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
     this->mu_->Lock();
   }
 
-  MutexLock(const MutexLock &) = delete;  // NOLINT(runtime/mutex)
-  MutexLock(MutexLock&&) = delete;  // NOLINT(runtime/mutex)
+  // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
+  // the above, the condition given by `cond` is also guaranteed to hold when
+  // this object is constructed.
+  explicit MutexLock(Mutex* mu, const Condition& cond)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+      : mu_(mu) {
+    this->mu_->LockWhen(cond);
+  }
+
+  MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)
+  MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)
   MutexLock& operator=(const MutexLock&) = delete;
   MutexLock& operator=(MutexLock&&) = delete;
 
   ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
 };
 
 // ReaderMutexLock
@@ -558,10 +578,16 @@
 // releases a shared lock on a `Mutex` via RAII.
 class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
  public:
-  explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+  explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
     mu->ReaderLock();
   }
 
+  explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
+      ABSL_SHARED_LOCK_FUNCTION(mu)
+      : mu_(mu) {
+    mu->ReaderLockWhen(cond);
+  }
+
   ReaderMutexLock(const ReaderMutexLock&) = delete;
   ReaderMutexLock(ReaderMutexLock&&) = delete;
   ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
@@ -570,7 +596,7 @@
   ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
 };
 
 // WriterMutexLock
@@ -579,11 +605,17 @@
 // releases a write (exclusive) lock on a `Mutex` via RAII.
 class ABSL_SCOPED_LOCKABLE WriterMutexLock {
  public:
-  explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     mu->WriterLock();
   }
 
+  explicit WriterMutexLock(Mutex* mu, const Condition& cond)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+      : mu_(mu) {
+    mu->WriterLockWhen(cond);
+  }
+
   WriterMutexLock(const WriterMutexLock&) = delete;
   WriterMutexLock(WriterMutexLock&&) = delete;
   WriterMutexLock& operator=(const WriterMutexLock&) = delete;
@@ -592,19 +624,19 @@
   ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
 };
 
 // -----------------------------------------------------------------------------
 // Condition
 // -----------------------------------------------------------------------------
 //
-// As noted above, `Mutex` contains a number of member functions which take a
-// `Condition` as an argument; clients can wait for conditions to become `true`
-// before attempting to acquire the mutex. These sections are known as
-// "condition critical" sections. To use a `Condition`, you simply need to
-// construct it, and use within an appropriate `Mutex` member function;
-// everything else in the `Condition` class is an implementation detail.
+// `Mutex` contains a number of member functions which take a `Condition` as an
+// argument; clients can wait for conditions to become `true` before attempting
+// to acquire the mutex. These sections are known as "condition critical"
+// sections. To use a `Condition`, you simply need to construct it, and use
+// within an appropriate `Mutex` member function; everything else in the
+// `Condition` class is an implementation detail.
 //
 // A `Condition` is specified as a function pointer which returns a boolean.
 // `Condition` functions should be pure functions -- their results should depend
@@ -622,16 +654,26 @@
 // `noexcept`; until then this requirement cannot be enforced in the
 // type system.)
 //
-// Note: to use a `Condition`, you need only construct it and pass it within the
-// appropriate `Mutex' member function, such as `Mutex::Await()`.
+// Note: to use a `Condition`, you need only construct it and pass it to a
+// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
+// constructor of one of the scope guard classes.
 //
-// Example:
+// Example using LockWhen/Unlock:
 //
 //   // assume count_ is not internal reference count
 //   int count_ ABSL_GUARDED_BY(mu_);
+//   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
 //
-//   mu_.LockWhen(Condition(+[](int* count) { return *count == 0; },
-//         &count_));
+//   mu_.LockWhen(count_is_zero);
+//   // ...
+//   mu_.Unlock();
+//
+// Example using a scope guard:
+//
+//   {
+//     MutexLock lock(&mu_, count_is_zero);
+//     // ...
+//   }
 //
 // When multiple threads are waiting on exactly the same condition, make sure
 // that they are constructed with the same parameters (same pointer to function
@@ -640,7 +682,7 @@
 class Condition {
  public:
   // A Condition that returns the result of "(*func)(arg)"
-  Condition(bool (*func)(void *), void *arg);
+  Condition(bool (*func)(void*), void* arg);
 
   // Templated version for people who are averse to casts.
   //
@@ -651,8 +693,22 @@
   // Note: lambdas in this case must contain no bound variables.
   //
   // See class comment for performance advice.
-  template<typename T>
-  Condition(bool (*func)(T *), T *arg);
+  template <typename T>
+  Condition(bool (*func)(T*), T* arg);
+
+  // Same as above, but allows for cases where `arg` comes from a pointer that
+  // is convertible to the function parameter type `T*` but not an exact match.
+  //
+  // For example, the argument might be `X*` but the function takes `const X*`,
+  // or the argument might be `Derived*` while the function takes `Base*`, and
+  // so on for cases where the argument pointer can be implicitly converted.
+  //
+  // Implementation notes: This constructor overload is required in addition to
+  // the one above to allow deduction of `T` from `arg` for cases such as where
+  // a function template is passed as `func`. Also, the dummy `typename = void`
+  // template parameter exists just to work around a MSVC mangling bug.
+  template <typename T, typename = void>
+  Condition(bool (*func)(T*), typename absl::internal::identity<T>::type* arg);
 
   // Templated version for invoking a method that returns a `bool`.
   //
@@ -662,16 +718,16 @@
   // Implementation Note: `absl::internal::identity` is used to allow methods to
   // come from base classes. A simpler signature like
   // `Condition(T*, bool (T::*)())` does not suffice.
-  template<typename T>
-  Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
+  template <typename T>
+  Condition(T* object, bool (absl::internal::identity<T>::type::*method)());
 
   // Same as above, for const members
-  template<typename T>
-  Condition(const T *object,
-            bool (absl::internal::identity<T>::type::* method)() const);
+  template <typename T>
+  Condition(const T* object,
+            bool (absl::internal::identity<T>::type::*method)() const);
 
   // A Condition that returns the value of `*cond`
-  explicit Condition(const bool *cond);
+  explicit Condition(const bool* cond);
 
   // Templated version for invoking a functor that returns a `bool`.
   // This approach accepts pointers to non-mutable lambdas, `std::function`,
@@ -686,10 +742,10 @@
   //   };
   //   mu_.Await(Condition(&reached));
   //
-  // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReadHeld()" in the
-  // lambda as it may be called when the mutex is being unlocked from a scope
-  // holding only a reader lock, which will make the assertion not fulfilled and
-  // crash the binary.
+  // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
+  // the lambda as it may be called when the mutex is being unlocked from a
+  // scope holding only a reader lock, which will make the assertion not
+  // fulfilled and crash the binary.
 
   // See class comment for performance advice. In particular, if there
   // might be more than one waiter for the same condition, make sure
@@ -698,13 +754,23 @@
   // Implementation note: The second template parameter ensures that this
   // constructor doesn't participate in overload resolution if T doesn't have
   // `bool operator() const`.
-  template <typename T, typename E = decltype(
-      static_cast<bool (T::*)() const>(&T::operator()))>
-  explicit Condition(const T *obj)
+  template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
+                            &T::operator()))>
+  explicit Condition(const T* obj)
       : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
 
   // A Condition that always returns `true`.
-  static const Condition kTrue;
+  // kTrue is only useful in a narrow set of circumstances, mostly when
+  // it's passed conditionally. For example:
+  //
+  //   mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
+  //
+  // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
+  // don't return immediately when the timeout happens, they still block until
+  // the Mutex becomes available. The return value of these methods does
+  // not indicate if the timeout was reached; rather it indicates whether or
+  // not the condition is true.
+  ABSL_CONST_INIT static const Condition kTrue;
 
   // Evaluates the condition.
   bool Eval() const;
@@ -716,25 +782,59 @@
   // Two `Condition` values are guaranteed equal if both their `func` and `arg`
   // components are the same. A null pointer is equivalent to a `true`
   // condition.
-  static bool GuaranteedEqual(const Condition *a, const Condition *b);
+  static bool GuaranteedEqual(const Condition* a, const Condition* b);
 
  private:
-  typedef bool (*InternalFunctionType)(void * arg);
-  typedef bool (Condition::*InternalMethodType)();
-  typedef bool (*InternalMethodCallerType)(void * arg,
-                                           InternalMethodType internal_method);
+  // Sizing an allocation for a method pointer can be subtle. In the Itanium
+  // specifications, a method pointer has a predictable, uniform size. On the
+  // other hand, MSVC ABI, method pointer sizes vary based on the
+  // inheritance of the class. Specifically, method pointers from classes with
+  // multiple inheritance are bigger than those of classes with single
+  // inheritance. Other variations also exist.
 
-  bool (*eval_)(const Condition*);  // Actual evaluator
-  InternalFunctionType function_;   // function taking pointer returning bool
-  InternalMethodType method_;       // method returning bool
-  void *arg_;                       // arg of function_ or object of method_
+#ifndef _MSC_VER
+  // Allocation for a function pointer or method pointer.
+  // The {0} initializer ensures that all unused bytes of this buffer are
+  // always zeroed out.  This is necessary, because GuaranteedEqual() compares
+  // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
+  using MethodPtr = bool (Condition::*)();
+  char callback_[sizeof(MethodPtr)] = {0};
+#else
+  // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
+  // may be the largest known pointer-to-member of any platform. For this
+  // reason we will allocate 24 bytes for MSVC platform toolchains.
+  char callback_[24] = {0};
+#endif
 
-  Condition();        // null constructor used only to create kTrue
+  // Function with which to evaluate callbacks and/or arguments.
+  bool (*eval_)(const Condition*) = nullptr;
+
+  // Either an argument for a function call or an object for a method call.
+  void* arg_ = nullptr;
 
   // Various functions eval_ can point to:
   static bool CallVoidPtrFunction(const Condition*);
-  template <typename T> static bool CastAndCallFunction(const Condition* c);
-  template <typename T> static bool CastAndCallMethod(const Condition* c);
+  template <typename T>
+  static bool CastAndCallFunction(const Condition* c);
+  template <typename T>
+  static bool CastAndCallMethod(const Condition* c);
+
+  // Helper methods for storing, validating, and reading callback arguments.
+  template <typename T>
+  inline void StoreCallback(T callback) {
+    static_assert(
+        sizeof(callback) <= sizeof(callback_),
+        "An overlarge pointer was passed as a callback to Condition.");
+    std::memcpy(callback_, &callback, sizeof(callback));
+  }
+
+  template <typename T>
+  inline void ReadCallback(T* callback) const {
+    std::memcpy(callback, callback_, sizeof(*callback));
+  }
+
+  // Used only to create kTrue.
+  constexpr Condition() = default;
 };
 
 // -----------------------------------------------------------------------------
@@ -762,9 +862,9 @@
 //
 // Usage to wake T is:
 //       mu.Lock();
-//      // process data, possibly establishing C
-//      if (C) { cv->Signal(); }
-//      mu.Unlock();
+//       // process data, possibly establishing C
+//       if (C) { cv->Signal(); }
+//       mu.Unlock();
 //
 // If C may be useful to more than one waiter, use `SignalAll()` instead of
 // `Signal()`.
@@ -786,7 +886,7 @@
   // spurious wakeup), then reacquires the `Mutex` and returns.
   //
   // Requires and ensures that the current thread holds the `Mutex`.
-  void Wait(Mutex *mu);
+  void Wait(Mutex* mu);
 
   // CondVar::WaitWithTimeout()
   //
@@ -801,7 +901,7 @@
   // to return `true` or `false`.
   //
   // Requires and ensures that the current thread holds the `Mutex`.
-  bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
+  bool WaitWithTimeout(Mutex* mu, absl::Duration timeout);
 
   // CondVar::WaitWithDeadline()
   //
@@ -818,7 +918,7 @@
   // to return `true` or `false`.
   //
   // Requires and ensures that the current thread holds the `Mutex`.
-  bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
+  bool WaitWithDeadline(Mutex* mu, absl::Time deadline);
 
   // CondVar::Signal()
   //
@@ -835,25 +935,17 @@
   // Causes all subsequent uses of this `CondVar` to be logged via
   // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
   // Note: this method substantially reduces `CondVar` performance.
-  void EnableDebugLog(const char *name);
+  void EnableDebugLog(const char* name);
 
  private:
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
-  synchronization_internal::CondVarImpl *impl() { return impl_.get(); }
-  synchronization_internal::SynchronizationStorage<
-      synchronization_internal::CondVarImpl>
-      impl_;
-#else
-  bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
-  void Remove(base_internal::PerThreadSynch *s);
-  void Wakeup(base_internal::PerThreadSynch *w);
+  bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
+  void Remove(base_internal::PerThreadSynch* s);
+  void Wakeup(base_internal::PerThreadSynch* w);
   std::atomic<intptr_t> cv_;  // Condition variable state.
-#endif
   CondVar(const CondVar&) = delete;
   CondVar& operator=(const CondVar&) = delete;
 };
 
-
 // Variants of MutexLock.
 //
 // If you find yourself using one of these, consider instead using
@@ -864,18 +956,29 @@
 // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
 class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
  public:
-  explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     if (this->mu_ != nullptr) {
       this->mu_->Lock();
     }
   }
+
+  explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+      : mu_(mu) {
+    if (this->mu_ != nullptr) {
+      this->mu_->LockWhen(cond);
+    }
+  }
+
   ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
-    if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+    if (this->mu_ != nullptr) {
+      this->mu_->Unlock();
+    }
   }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
   MutexLockMaybe(const MutexLockMaybe&) = delete;
   MutexLockMaybe(MutexLockMaybe&&) = delete;
   MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
@@ -888,30 +991,33 @@
 // mutex before destruction. `Release()` may be called at most once.
 class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
  public:
-  explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     this->mu_->Lock();
   }
+
+  explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+      : mu_(mu) {
+    this->mu_->LockWhen(cond);
+  }
+
   ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
-    if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+    if (this->mu_ != nullptr) {
+      this->mu_->Unlock();
+    }
   }
 
   void Release() ABSL_UNLOCK_FUNCTION();
 
  private:
-  Mutex *mu_;
+  Mutex* mu_;
   ReleasableMutexLock(const ReleasableMutexLock&) = delete;
   ReleasableMutexLock(ReleasableMutexLock&&) = delete;
   ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
   ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
 };
 
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
-
-inline constexpr Mutex::Mutex(absl::ConstInitType) : impl_(absl::kConstInit) {}
-
-#else
-
 inline Mutex::Mutex() : mu_(0) {
   ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
 }
@@ -920,61 +1026,70 @@
 
 inline CondVar::CondVar() : cv_(0) {}
 
-#endif  // ABSL_INTERNAL_USE_NONPROD_MUTEX
-
 // static
 template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
-  typedef bool (T::*MemberType)();
-  MemberType rm = reinterpret_cast<MemberType>(c->method_);
-  T *x = static_cast<T *>(c->arg_);
-  return (x->*rm)();
+bool Condition::CastAndCallMethod(const Condition* c) {
+  T* object = static_cast<T*>(c->arg_);
+  bool (T::*method_pointer)();
+  c->ReadCallback(&method_pointer);
+  return (object->*method_pointer)();
 }
 
 // static
 template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
-  typedef bool (*FuncType)(T *);
-  FuncType fn = reinterpret_cast<FuncType>(c->function_);
-  T *x = static_cast<T *>(c->arg_);
-  return (*fn)(x);
+bool Condition::CastAndCallFunction(const Condition* c) {
+  bool (*function)(T*);
+  c->ReadCallback(&function);
+  T* argument = static_cast<T*>(c->arg_);
+  return (*function)(argument);
 }
 
 template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
+inline Condition::Condition(bool (*func)(T*), T* arg)
     : eval_(&CastAndCallFunction<T>),
-      function_(reinterpret_cast<InternalFunctionType>(func)),
-      method_(nullptr),
-      arg_(const_cast<void *>(static_cast<const void *>(arg))) {}
+      arg_(const_cast<void*>(static_cast<const void*>(arg))) {
+  static_assert(sizeof(&func) <= sizeof(callback_),
+                "An overlarge function pointer was passed to Condition.");
+  StoreCallback(func);
+}
+
+template <typename T, typename>
+inline Condition::Condition(bool (*func)(T*),
+                            typename absl::internal::identity<T>::type* arg)
+    // Just delegate to the overload above.
+    : Condition(func, arg) {}
 
 template <typename T>
-inline Condition::Condition(T *object,
+inline Condition::Condition(T* object,
                             bool (absl::internal::identity<T>::type::*method)())
-    : eval_(&CastAndCallMethod<T>),
-      function_(nullptr),
-      method_(reinterpret_cast<InternalMethodType>(method)),
-      arg_(object) {}
+    : eval_(&CastAndCallMethod<T>), arg_(object) {
+  static_assert(sizeof(&method) <= sizeof(callback_),
+                "An overlarge method pointer was passed to Condition.");
+  StoreCallback(method);
+}
 
 template <typename T>
-inline Condition::Condition(const T *object,
+inline Condition::Condition(const T* object,
                             bool (absl::internal::identity<T>::type::*method)()
                                 const)
     : eval_(&CastAndCallMethod<T>),
-      function_(nullptr),
-      method_(reinterpret_cast<InternalMethodType>(method)),
-      arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {}
+      arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
+  StoreCallback(method);
+}
 
-// Register a hook for profiling support.
+// Register hooks for profiling support.
 //
 // The function pointer registered here will be called whenever a mutex is
-// contended.  The callback is given the absl/base/cycleclock.h timestamp when
-// waiting began.
+// contended.  The callback is given the cycles for which waiting happened (as
+// measured by //absl/base/internal/cycleclock.h, and which may not
+// be real "cycle" counts.)
 //
-// Calls to this function do not race or block, but there is no ordering
-// guaranteed between calls to this function and call to the provided hook.
-// In particular, the previously registered hook may still be called for some
-// time after this function returns.
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
+// There is no ordering guarantee between when the hook is registered and when
+// callbacks will begin.  Only a single profiler can be installed in a running
+// binary; if this function is called a second time with a different function
+// pointer, the value is ignored (and will cause an assertion failure in debug
+// mode.)
+void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
 
 // Register a hook for Mutex tracing.
 //
@@ -986,12 +1101,10 @@
 //
 // The only event name currently sent is "slow release".
 //
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
-                              int64_t wait_cycles));
-
-// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
-// into a single interface, since they are only ever called in pairs.
+// This has the same ordering and single-use limitations as
+// RegisterMutexProfiler() above.
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
+                                    int64_t wait_cycles));
 
 // Register a hook for CondVar tracing.
 //
@@ -1003,24 +1116,9 @@
 // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
 // "SignalAll wakeup".
 //
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
-
-// Register a hook for symbolizing stack traces in deadlock detector reports.
-//
-// 'pc' is the program counter being symbolized, 'out' is the buffer to write
-// into, and 'out_size' is the size of the buffer.  This function can return
-// false if symbolizing failed, or true if a NUL-terminated symbol was written
-// to 'out.'
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-//
-// DEPRECATED: The default symbolizer function is absl::Symbolize() and the
-// ability to register a different hook for symbolizing stack traces will be
-// removed on or after 2023-05-01.
-ABSL_DEPRECATED("absl::RegisterSymbolizer() is deprecated and will be removed "
-                "on or after 2023-05-01")
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+// This has the same ordering and single-use limitations as
+// RegisterMutexProfiler() above.
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
 
 // EnableMutexInvariantDebugging()
 //
@@ -1037,7 +1135,7 @@
 enum class OnDeadlockCycle {
   kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
   kReport,  // Report lock cycles to stderr when detected
-  kAbort,  // Report lock cycles to stderr when detected, then abort
+  kAbort,   // Report lock cycles to stderr when detected, then abort
 };
 
 // SetMutexDeadlockDetectionMode()
@@ -1059,7 +1157,7 @@
 // By changing our extension points to be extern "C", we dodge this
 // check.
 extern "C" {
-void AbslInternalMutexYield();
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
 }  // extern "C"
 
 #endif  // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/abseil-cpp/absl/synchronization/mutex_benchmark.cc b/abseil-cpp/absl/synchronization/mutex_benchmark.cc
index 933ea14..b5d2fbc 100644
--- a/abseil-cpp/absl/synchronization/mutex_benchmark.cc
+++ b/abseil-cpp/absl/synchronization/mutex_benchmark.cc
@@ -61,8 +61,124 @@
   std::mutex* mu_;
 };
 
+// RAII object to change the Mutex priority of the running thread.
+class ScopedThreadMutexPriority {
+ public:
+  explicit ScopedThreadMutexPriority(int priority) {
+    absl::base_internal::ThreadIdentity* identity =
+        absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+    identity->per_thread_synch.priority = priority;
+    // Bump next_priority_read_cycles to the infinite future so that the
+    // implementation doesn't re-read the thread's actual scheduler priority
+    // and replace our temporary scoped priority.
+    identity->per_thread_synch.next_priority_read_cycles =
+        std::numeric_limits<int64_t>::max();
+  }
+  ~ScopedThreadMutexPriority() {
+    // Reset the "next priority read time" back to the infinite past so that
+    // the next time the Mutex implementation wants to know this thread's
+    // priority, it re-reads it from the OS instead of using our overridden
+    // priority.
+    absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
+        ->per_thread_synch.next_priority_read_cycles =
+        std::numeric_limits<int64_t>::min();
+  }
+};
+
+void BM_MutexEnqueue(benchmark::State& state) {
+  // In the "multiple priorities" variant of the benchmark, one of the
+  // threads runs with Mutex priority 0 while the rest run at elevated priority.
+  // This benchmarks the performance impact of the presence of a low priority
+  // waiter when a higher priority waiter adds itself of the queue
+  // (b/175224064).
+  //
+  // NOTE: The actual scheduler priority is not modified in this benchmark:
+  // all of the threads get CPU slices with the same priority. Only the
+  // Mutex queueing behavior is modified.
+  const bool multiple_priorities = state.range(0);
+  ScopedThreadMutexPriority priority_setter(
+      (multiple_priorities && state.thread_index() != 0) ? 1 : 0);
+
+  struct Shared {
+    absl::Mutex mu;
+    std::atomic<int> looping_threads{0};
+    std::atomic<int> blocked_threads{0};
+    std::atomic<bool> thread_has_mutex{false};
+  };
+  static Shared* shared = new Shared;
+
+  // Set up 'blocked_threads' to count how many threads are currently blocked
+  // in Abseil synchronization code.
+  //
+  // NOTE: Blocking done within the Google Benchmark library itself (e.g.
+  // the barrier which synchronizes threads entering and exiting the benchmark
+  // loop) does _not_ get registered in this counter. This is because Google
+  // Benchmark uses its own synchronization primitives based on std::mutex, not
+  // Abseil synchronization primitives. If at some point the benchmark library
+  // merges into Abseil, this code may break.
+  absl::synchronization_internal::PerThreadSem::SetThreadBlockedCounter(
+      &shared->blocked_threads);
+
+  // The benchmark framework may run several iterations in the same process,
+  // reusing the same static-initialized 'shared' object. Given the semantics
+  // of the members, here, we expect everything to be reset to zero by the
+  // end of any iteration. Assert that's the case, just to be sure.
+  ABSL_RAW_CHECK(
+      shared->looping_threads.load(std::memory_order_relaxed) == 0 &&
+          shared->blocked_threads.load(std::memory_order_relaxed) == 0 &&
+          !shared->thread_has_mutex.load(std::memory_order_relaxed),
+      "Shared state isn't zeroed at start of benchmark iteration");
+
+  static constexpr int kBatchSize = 1000;
+  while (state.KeepRunningBatch(kBatchSize)) {
+    shared->looping_threads.fetch_add(1);
+    for (int i = 0; i < kBatchSize; i++) {
+      {
+        absl::MutexLock l(&shared->mu);
+        shared->thread_has_mutex.store(true, std::memory_order_relaxed);
+        // Spin until all other threads are either out of the benchmark loop
+        // or blocked on the mutex. This ensures that the mutex queue is kept
+        // at its maximal length to benchmark the performance of queueing on
+        // a highly contended mutex.
+        while (shared->looping_threads.load(std::memory_order_relaxed) -
+                   shared->blocked_threads.load(std::memory_order_relaxed) !=
+               1) {
+        }
+        shared->thread_has_mutex.store(false);
+      }
+      // Spin until some other thread has acquired the mutex before we block
+      // again. This ensures that we always go through the slow (queueing)
+      // acquisition path rather than reacquiring the mutex we just released.
+      while (!shared->thread_has_mutex.load(std::memory_order_relaxed) &&
+             shared->looping_threads.load(std::memory_order_relaxed) > 1) {
+      }
+    }
+    // The benchmark framework uses a barrier to ensure that all of the threads
+    // complete their benchmark loop together before any of the threads exit
+    // the loop. So, we need to remove ourselves from the "looping threads"
+    // counter here before potentially blocking on that barrier. Otherwise,
+    // another thread spinning above might wait forever for this thread to
+    // block on the mutex while we in fact are waiting to exit.
+    shared->looping_threads.fetch_add(-1);
+  }
+  absl::synchronization_internal::PerThreadSem::SetThreadBlockedCounter(
+      nullptr);
+}
+
+BENCHMARK(BM_MutexEnqueue)
+    ->Threads(4)
+    ->Threads(64)
+    ->Threads(128)
+    ->Threads(512)
+    ->ArgName("multiple_priorities")
+    ->Arg(false)
+    ->Arg(true);
+
 template <typename MutexType>
 void BM_Contended(benchmark::State& state) {
+  int priority = state.thread_index() % state.range(1);
+  ScopedThreadMutexPriority priority_setter(priority);
+
   struct Shared {
     MutexType mu;
     int data = 0;
@@ -80,86 +196,56 @@
     // To achieve this amount of local work is multiplied by number of threads
     // to keep ratio between local work and critical section approximately
     // equal regardless of number of threads.
-    DelayNs(100 * state.threads, &local);
+    DelayNs(100 * state.threads(), &local);
     RaiiLocker<MutexType> locker(&shared->mu);
     DelayNs(state.range(0), &shared->data);
   }
 }
+void SetupBenchmarkArgs(benchmark::internal::Benchmark* bm,
+                        bool do_test_priorities) {
+  const int max_num_priorities = do_test_priorities ? 2 : 1;
+  bm->UseRealTime()
+      // ThreadPerCpu poorly handles non-power-of-two CPU counts.
+      ->Threads(1)
+      ->Threads(2)
+      ->Threads(4)
+      ->Threads(6)
+      ->Threads(8)
+      ->Threads(12)
+      ->Threads(16)
+      ->Threads(24)
+      ->Threads(32)
+      ->Threads(48)
+      ->Threads(64)
+      ->Threads(96)
+      ->Threads(128)
+      ->Threads(192)
+      ->Threads(256)
+      ->ArgNames({"cs_ns", "num_prios"});
+  // Some empirically chosen amounts of work in critical section.
+  // 1 is low contention, 2000 is high contention and few values in between.
+  for (int critical_section_ns : {1, 20, 50, 200, 2000}) {
+    for (int num_priorities = 1; num_priorities <= max_num_priorities;
+         num_priorities++) {
+      bm->ArgPair(critical_section_ns, num_priorities);
+    }
+  }
+}
 
 BENCHMARK_TEMPLATE(BM_Contended, absl::Mutex)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
+    ->Apply([](benchmark::internal::Benchmark* bm) {
+      SetupBenchmarkArgs(bm, /*do_test_priorities=*/true);
+    });
 
 BENCHMARK_TEMPLATE(BM_Contended, absl::base_internal::SpinLock)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
+    ->Apply([](benchmark::internal::Benchmark* bm) {
+      SetupBenchmarkArgs(bm, /*do_test_priorities=*/false);
+    });
 
 BENCHMARK_TEMPLATE(BM_Contended, std::mutex)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
+    ->Apply([](benchmark::internal::Benchmark* bm) {
+      SetupBenchmarkArgs(bm, /*do_test_priorities=*/false);
+    });
 
 // Measure the overhead of conditions on mutex release (when they must be
 // evaluated).  Mutex has (some) support for equivalence classes allowing
diff --git a/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc b/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc
new file mode 100644
index 0000000..f4c82d2
--- /dev/null
+++ b/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc
@@ -0,0 +1,138 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/mutex.h"
+
+#include <cstdlib>
+#include <string>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace {
+
+class IncompleteClass;
+
+#ifdef _MSC_VER
+// These tests verify expectations about sizes of MSVC pointers to methods.
+// Pointers to methods are distinguished by whether their class hierarchies
+// contain single inheritance, multiple inheritance, or virtual inheritance.
+
+// Declare classes of the various MSVC inheritance types.
+class __single_inheritance SingleInheritance{};
+class __multiple_inheritance MultipleInheritance;
+class __virtual_inheritance VirtualInheritance;
+
+TEST(MutexMethodPointerTest, MicrosoftMethodPointerSize) {
+  void (SingleInheritance::*single_inheritance_method_pointer)();
+  void (MultipleInheritance::*multiple_inheritance_method_pointer)();
+  void (VirtualInheritance::*virtual_inheritance_method_pointer)();
+
+#if defined(_M_IX86) || defined(_M_ARM)
+  static_assert(sizeof(single_inheritance_method_pointer) == 4,
+                "Unexpected sizeof(single_inheritance_method_pointer).");
+  static_assert(sizeof(multiple_inheritance_method_pointer) == 8,
+                "Unexpected sizeof(multiple_inheritance_method_pointer).");
+  static_assert(sizeof(virtual_inheritance_method_pointer) == 12,
+                "Unexpected sizeof(virtual_inheritance_method_pointer).");
+#elif defined(_M_X64) || defined(__aarch64__)
+  static_assert(sizeof(single_inheritance_method_pointer) == 8,
+                "Unexpected sizeof(single_inheritance_method_pointer).");
+  static_assert(sizeof(multiple_inheritance_method_pointer) == 16,
+                "Unexpected sizeof(multiple_inheritance_method_pointer).");
+  static_assert(sizeof(virtual_inheritance_method_pointer) == 16,
+                "Unexpected sizeof(virtual_inheritance_method_pointer).");
+#endif
+  void (IncompleteClass::*incomplete_class_method_pointer)();
+  static_assert(sizeof(incomplete_class_method_pointer) >=
+                    sizeof(virtual_inheritance_method_pointer),
+                "Failed invariant: sizeof(incomplete_class_method_pointer) >= "
+                "sizeof(virtual_inheritance_method_pointer)!");
+}
+
+class Callback {
+  bool x = true;
+
+ public:
+  Callback() {}
+  bool method() {
+    x = !x;
+    return x;
+  }
+};
+
+class M2 {
+  bool x = true;
+
+ public:
+  M2() {}
+  bool method2() {
+    x = !x;
+    return x;
+  }
+};
+
+class MultipleInheritance : public Callback, public M2 {};
+
+TEST(MutexMethodPointerTest, ConditionWithMultipleInheritanceMethod) {
+  // This test ensures that Condition can deal with method pointers from classes
+  // with multiple inheritance.
+  MultipleInheritance object = MultipleInheritance();
+  absl::Condition condition(&object, &MultipleInheritance::method);
+  EXPECT_FALSE(condition.Eval());
+  EXPECT_TRUE(condition.Eval());
+}
+
+class __virtual_inheritance VirtualInheritance : virtual public Callback {
+  bool x = false;
+
+ public:
+  VirtualInheritance() {}
+  bool method() {
+    x = !x;
+    return x;
+  }
+};
+
+TEST(MutexMethodPointerTest, ConditionWithVirtualInheritanceMethod) {
+  // This test ensures that Condition can deal with method pointers from classes
+  // with virtual inheritance.
+  VirtualInheritance object = VirtualInheritance();
+  absl::Condition condition(&object, &VirtualInheritance::method);
+  EXPECT_TRUE(condition.Eval());
+  EXPECT_FALSE(condition.Eval());
+}
+#endif  // #ifdef _MSC_VER
+
+TEST(MutexMethodPointerTest, ConditionWithIncompleteClassMethod) {
+  using IncompleteClassMethodPointer = void (IncompleteClass::*)();
+
+  union CallbackSlot {
+    void (*anonymous_function_pointer)();
+    IncompleteClassMethodPointer incomplete_class_method_pointer;
+  };
+
+  static_assert(sizeof(CallbackSlot) >= sizeof(IncompleteClassMethodPointer),
+                "The callback slot is not big enough for method pointers.");
+  static_assert(
+      sizeof(CallbackSlot) == sizeof(IncompleteClassMethodPointer),
+      "The callback slot is not big enough for anonymous function pointers.");
+
+#if defined(_MSC_VER)
+  static_assert(sizeof(IncompleteClassMethodPointer) <= 24,
+                "The pointer to a method of an incomplete class is too big.");
+#endif
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/synchronization/mutex_test.cc b/abseil-cpp/absl/synchronization/mutex_test.cc
index 16fc905..b585c34 100644
--- a/abseil-cpp/absl/synchronization/mutex_test.cc
+++ b/abseil-cpp/absl/synchronization/mutex_test.cc
@@ -26,13 +26,15 @@
 #include <random>
 #include <string>
 #include <thread>  // NOLINT(build/c++11)
+#include <type_traits>
 #include <vector>
 
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/sysinfo.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
 #include "absl/memory/memory.h"
 #include "absl/synchronization/internal/thread_pool.h"
 #include "absl/time/clock.h"
@@ -86,7 +88,7 @@
 
 static void CheckSumG0G1(void *v) {
   TestContext *cxt = static_cast<TestContext *>(v);
-  ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in CheckSumG0G1");
+  CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
   SetInvariantChecked(true);
 }
 
@@ -131,7 +133,7 @@
   } else {
     for (int i = 0; i != cxt->iterations; i++) {
       absl::ReaderMutexLock l(&cxt->mu);
-      ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in TestRW");
+      CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
       cxt->mu.AssertReaderHeld();
     }
   }
@@ -156,7 +158,7 @@
   cxt->mu.AssertHeld();
   while (cxt->g0 < cxt->iterations) {
     cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
-    ABSL_RAW_CHECK(mc.MyTurn(), "Error in TestAwait");
+    CHECK(mc.MyTurn()) << "Error in TestAwait";
     cxt->mu.AssertHeld();
     if (cxt->g0 < cxt->iterations) {
       int a = cxt->g0 + 1;
@@ -184,7 +186,7 @@
 }
 
 static void TestSignal(TestContext *cxt, int c) {
-  ABSL_RAW_CHECK(cxt->threads == 2, "TestSignal should use 2 threads");
+  CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
   int target = c;
   absl::MutexLock l(&cxt->mu);
   cxt->mu.AssertHeld();
@@ -221,8 +223,8 @@
 static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
 
 static void TestTime(TestContext *cxt, int c, bool use_cv) {
-  ABSL_RAW_CHECK(cxt->iterations == 1, "TestTime should only use 1 iteration");
-  ABSL_RAW_CHECK(cxt->threads > 2, "TestTime should use more than 2 threads");
+  CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
+  CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
   const bool kFalse = false;
   absl::Condition false_cond(&kFalse);
   absl::Condition g0ge2(G0GE2, cxt);
@@ -233,26 +235,24 @@
     if (use_cv) {
       cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
     } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
+      CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+          << "TestTime failed";
     }
     absl::Duration elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
-        "TestTime failed");
-    ABSL_RAW_CHECK(cxt->g0 == 1, "TestTime failed");
+    CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+        << "TestTime failed";
+    CHECK_EQ(cxt->g0, 1) << "TestTime failed";
 
     start = absl::Now();
     if (use_cv) {
       cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
     } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
+      CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+          << "TestTime failed";
     }
     elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
-        "TestTime failed");
+    CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+        << "TestTime failed";
     cxt->g0++;
     if (use_cv) {
       cxt->cv.Signal();
@@ -262,26 +262,24 @@
     if (use_cv) {
       cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
     } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)),
-                     "TestTime failed");
+      CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
+          << "TestTime failed";
     }
     elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0),
-        "TestTime failed");
-    ABSL_RAW_CHECK(cxt->g0 >= 3, "TestTime failed");
+    CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
+        << "TestTime failed";
+    CHECK_GE(cxt->g0, 3) << "TestTime failed";
 
     start = absl::Now();
     if (use_cv) {
       cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
     } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
+      CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+          << "TestTime failed";
     }
     elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
-        "TestTime failed");
+    CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+        << "TestTime failed";
     if (use_cv) {
       cxt->cv.SignalAll();
     }
@@ -290,13 +288,13 @@
     if (use_cv) {
       cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
     } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
+      CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+          << "TestTime failed";
     }
     elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(absl::Seconds(0.9) <= elapsed &&
-                   elapsed <= absl::Seconds(2.0), "TestTime failed");
-    ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed");
+    CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+        << "TestTime failed";
+    CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
 
   } else if (c == 1) {
     absl::MutexLock l(&cxt->mu);
@@ -304,14 +302,12 @@
     if (use_cv) {
       cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
     } else {
-      ABSL_RAW_CHECK(
-          !cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)),
-          "TestTime failed");
+      CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
+          << "TestTime failed";
     }
     const absl::Duration elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9),
-        "TestTime failed");
+    CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
+        << "TestTime failed";
     cxt->g0++;
   } else if (c == 2) {
     absl::MutexLock l(&cxt->mu);
@@ -320,8 +316,8 @@
         cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
       }
     } else {
-      ABSL_RAW_CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)),
-                     "TestTime failed");
+      CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
+          << "TestTime failed";
     }
     cxt->g0++;
   } else {
@@ -342,7 +338,7 @@
 static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
 
 static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
-                    const std::function<void(int)>& cb) {
+                    const std::function<void(int)> &cb) {
   mu->Lock();
   int c = (*c0)++;
   mu->Unlock();
@@ -365,9 +361,9 @@
   cxt->threads = threads;
   absl::synchronization_internal::ThreadPool tp(threads);
   for (int i = 0; i != threads; i++) {
-    tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
-                          std::function<void(int)>(
-                              std::bind(test, cxt, std::placeholders::_1))));
+    tp.Schedule(std::bind(
+        &EndTest, &c0, &c1, &mu2, &cv2,
+        std::function<void(int)>(std::bind(test, cxt, std::placeholders::_1))));
   }
   mu2.Lock();
   while (c1 != threads) {
@@ -398,7 +394,7 @@
   TestContext cxt;
   cxt.mu.EnableInvariantDebugging(invariant, &cxt);
   int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
-  ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
+  CHECK(GetInvariantChecked()) << "Invariant not checked";
   absl::EnableMutexInvariantDebugging(false);  // Restore.
   return ret;
 }
@@ -681,14 +677,14 @@
   bool waiting = false;
 };
 
-static bool LockWhenTestIsCond(LockWhenTestStruct* s) {
+static bool LockWhenTestIsCond(LockWhenTestStruct *s) {
   s->mu2.Lock();
   s->waiting = true;
   s->mu2.Unlock();
   return s->cond;
 }
 
-static void LockWhenTestWaitForIsCond(LockWhenTestStruct* s) {
+static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) {
   s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
   s->mu1.Unlock();
 }
@@ -707,6 +703,40 @@
   t.join();
 }
 
+TEST(Mutex, LockWhenGuard) {
+  absl::Mutex mu;
+  int n = 30;
+  bool done = false;
+
+  // We don't inline the lambda because the conversion is ambiguous in MSVC.
+  bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
+  bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
+
+  std::thread t1([&mu, &n, &done, cond_eq_10]() {
+    absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
+    done = true;
+  });
+
+  std::thread t2[10];
+  for (std::thread &t : t2) {
+    t = std::thread([&mu, &n, cond_lt_10]() {
+      absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
+      ++n;
+    });
+  }
+
+  {
+    absl::MutexLock lock(&mu);
+    n = 0;
+  }
+
+  for (std::thread &t : t2) t.join();
+  t1.join();
+
+  EXPECT_TRUE(done);
+  EXPECT_EQ(n, 10);
+}
+
 // --------------------------------------------------------
 // The following test requires Mutex::ReaderLock to be a real shared
 // lock, which is not the case in all builds.
@@ -818,7 +848,7 @@
 // held and then destroyed (w/o unlocking).
 #ifdef ABSL_HAVE_THREAD_SANITIZER
 // TSAN reports errors when locked Mutexes are destroyed.
-TEST(Mutex, DISABLED_LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, DISABLED_LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
 #else
 TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
 #endif
@@ -836,31 +866,109 @@
   }
 }
 
-// --------------------------------------------------------
-// Test for bug with pattern of readers using a condvar.  The bug was that if a
-// reader went to sleep on a condition variable while one or more other readers
-// held the lock, but there were no waiters, the reader count (held in the
-// mutex word) would be lost.  (This is because Enqueue() had at one time
-// always placed the thread on the Mutex queue.  Later (CL 4075610), to
-// tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
-// changed so that it could also place a thread on a condition-variable.  This
-// introduced the case where Enqueue() returned with an empty queue, and this
-// case was handled incorrectly in one place.)
+// Some functions taking pointers to non-const.
+bool Equals42(int *p) { return *p == 42; }
+bool Equals43(int *p) { return *p == 43; }
 
-static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
-                                     int *running) {
-  std::random_device dev;
-  std::mt19937 gen(dev());
-  std::uniform_int_distribution<int> random_millis(0, 15);
-  mu->ReaderLock();
-  while (*running == 3) {
-    absl::SleepFor(absl::Milliseconds(random_millis(gen)));
-    cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
-  }
-  mu->ReaderUnlock();
-  mu->Lock();
-  (*running)--;
-  mu->Unlock();
+// Some functions taking pointers to const.
+bool ConstEquals42(const int *p) { return *p == 42; }
+bool ConstEquals43(const int *p) { return *p == 43; }
+
+// Some function templates taking pointers. Note it's possible for `T` to be
+// deduced as non-const or const, which creates the potential for ambiguity,
+// but which the implementation is careful to avoid.
+template <typename T>
+bool TemplateEquals42(T *p) {
+  return *p == 42;
+}
+template <typename T>
+bool TemplateEquals43(T *p) {
+  return *p == 43;
+}
+
+TEST(Mutex, FunctionPointerCondition) {
+  // Some arguments.
+  int x = 42;
+  const int const_x = 42;
+
+  // Parameter non-const, argument non-const.
+  EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
+  EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
+
+  // Parameter const, argument non-const.
+  EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
+  EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
+
+  // Parameter const, argument const.
+  EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
+  EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
+
+  // Parameter type deduced, argument non-const.
+  EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
+  EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
+
+  // Parameter type deduced, argument const.
+  EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
+  EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
+
+  // Parameter non-const, argument const is not well-formed.
+  EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
+                                      decltype(&const_x)>::value));
+  // Validate use of is_constructible by contrasting to a well-formed case.
+  EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
+                                     decltype(&const_x)>::value));
+}
+
+// Example base and derived class for use in predicates and test below. Not a
+// particularly realistic example, but it suffices for testing purposes.
+struct Base {
+  explicit Base(int v) : value(v) {}
+  int value;
+};
+struct Derived : Base {
+  explicit Derived(int v) : Base(v) {}
+};
+
+// Some functions taking pointer to non-const `Base`.
+bool BaseEquals42(Base *p) { return p->value == 42; }
+bool BaseEquals43(Base *p) { return p->value == 43; }
+
+// Some functions taking pointer to const `Base`.
+bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
+bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
+
+TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
+  // Some arguments.
+  Derived derived(42);
+  const Derived const_derived(42);
+
+  // Parameter non-const base, argument derived non-const.
+  EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
+  EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
+
+  // Parameter const base, argument derived non-const.
+  EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
+  EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
+
+  // Parameter const base, argument derived const.
+  EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
+  EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
+
+  // Parameter const base, argument derived const.
+  EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
+  EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
+
+  // Parameter derived, argument base is not well-formed.
+  bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
+  EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
+                                      Base *>::value));
+  EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
+                                      const Base *>::value));
+  // Validate use of is_constructible by contrasting to well-formed cases.
+  EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
+                                     Derived *>::value));
+  EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
+                                     const Derived *>::value));
 }
 
 struct True {
@@ -911,6 +1019,33 @@
   }
 }
 
+// --------------------------------------------------------
+// Test for bug with pattern of readers using a condvar.  The bug was that if a
+// reader went to sleep on a condition variable while one or more other readers
+// held the lock, but there were no waiters, the reader count (held in the
+// mutex word) would be lost.  (This is because Enqueue() had at one time
+// always placed the thread on the Mutex queue.  Later (CL 4075610), to
+// tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
+// changed so that it could also place a thread on a condition-variable.  This
+// introduced the case where Enqueue() returned with an empty queue, and this
+// case was handled incorrectly in one place.)
+
+static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
+                                     int *running) {
+  std::random_device dev;
+  std::mt19937 gen(dev());
+  std::uniform_int_distribution<int> random_millis(0, 15);
+  mu->ReaderLock();
+  while (*running == 3) {
+    absl::SleepFor(absl::Milliseconds(random_millis(gen)));
+    cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
+  }
+  mu->ReaderUnlock();
+  mu->Lock();
+  (*running)--;
+  mu->Unlock();
+}
+
 static bool IntIsZero(int *x) { return *x == 0; }
 
 // Test for reader waiting condition variable when there are other readers
@@ -952,7 +1087,7 @@
                                absl::Milliseconds(100));
     x->mu1.Unlock();
   }
-  ABSL_RAW_CHECK(x->value < 4, "should not be invoked a fourth time");
+  CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
 
   // We arrange for the condition to return true on only the 2nd and 3rd calls.
   return x->value == 2 || x->value == 3;
@@ -1002,9 +1137,6 @@
   x.mu0.Unlock();
 }
 
-// The deadlock detector is not part of non-prod builds, so do not test it.
-#if !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
-
 TEST(Mutex, DeadlockDetector) {
   absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
 
@@ -1098,6 +1230,25 @@
   absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
 }
 
+TEST(Mutex, DeadlockDetectorLongCycle) {
+  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
+
+  // This test generates a warning if it passes, and crashes otherwise.
+  // Cause bazel to ignore the warning.
+  ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
+
+  // Check that we survive a deadlock with a lock cycle.
+  std::vector<absl::Mutex> mutex(100);
+  for (size_t i = 0; i != mutex.size(); i++) {
+    mutex[i].Lock();
+    mutex[(i + 1) % mutex.size()].Lock();
+    mutex[i].Unlock();
+    mutex[(i + 1) % mutex.size()].Unlock();
+  }
+
+  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+}
+
 // This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
 // annotation-based static thread-safety analysis is not currently
 // predicate-aware and cannot tell if the two for-loops that acquire and
@@ -1122,7 +1273,7 @@
 
 #ifdef ABSL_HAVE_THREAD_SANITIZER
 // TSAN reports errors when locked Mutexes are destroyed.
-TEST(Mutex, DISABLED_DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, DISABLED_DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
 #else
 TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
 #endif
@@ -1158,7 +1309,6 @@
   c.Lock();
   c.Unlock();
 }
-#endif  // !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
 
 // --------------------------------------------------------
 // Test for timeouts/deadlines on condition waits that are specified using
@@ -1184,11 +1334,9 @@
   // different clock than absl::Now(), but these cases should be handled by the
   // the retry mechanism in each TimeoutTest.
   if (actual_delay < expected_delay) {
-    ABSL_RAW_LOG(WARNING,
-                 "Actual delay %s was too short, expected %s (difference %s)",
-                 absl::FormatDuration(actual_delay).c_str(),
-                 absl::FormatDuration(expected_delay).c_str(),
-                 absl::FormatDuration(actual_delay - expected_delay).c_str());
+    LOG(WARNING) << "Actual delay " << actual_delay
+                 << " was too short, expected " << expected_delay
+                 << " (difference " << actual_delay - expected_delay << ")";
     pass = false;
   }
   // If the expected delay is <= zero then allow a small error tolerance, since
@@ -1199,11 +1347,9 @@
                                  ? absl::Milliseconds(10)
                                  : TimeoutTestAllowedSchedulingDelay();
   if (actual_delay > expected_delay + tolerance) {
-    ABSL_RAW_LOG(WARNING,
-                 "Actual delay %s was too long, expected %s (difference %s)",
-                 absl::FormatDuration(actual_delay).c_str(),
-                 absl::FormatDuration(expected_delay).c_str(),
-                 absl::FormatDuration(actual_delay - expected_delay).c_str());
+    LOG(WARNING) << "Actual delay " << actual_delay
+                 << " was too long, expected " << expected_delay
+                 << " (difference " << actual_delay - expected_delay << ")";
     pass = false;
   }
   return pass;
@@ -1253,12 +1399,6 @@
             << " expected_delay: " << param.expected_delay;
 }
 
-std::string FormatString(const TimeoutTestParam &param) {
-  std::ostringstream os;
-  os << param;
-  return os.str();
-}
-
 // Like `thread::Executor::ScheduleAt` except:
 // a) Delays zero or negative are executed immediately in the current thread.
 // b) Infinite delays are never scheduled.
@@ -1388,13 +1528,13 @@
 
 TEST_P(TimeoutTest, Await) {
   const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+  LOG(INFO) << "Params: " << params;
 
   // Because this test asserts bounds on scheduling delays it is flaky.  To
   // compensate it loops forever until it passes.  Failures express as test
   // timeouts, in which case the test log can be used to diagnose the issue.
   for (int attempt = 1;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+    LOG(INFO) << "Attempt " << attempt;
 
     absl::Mutex mu;
     bool value = false;  // condition value (under mu)
@@ -1422,13 +1562,13 @@
 
 TEST_P(TimeoutTest, LockWhen) {
   const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+  LOG(INFO) << "Params: " << params;
 
   // Because this test asserts bounds on scheduling delays it is flaky.  To
   // compensate it loops forever until it passes.  Failures express as test
   // timeouts, in which case the test log can be used to diagnose the issue.
   for (int attempt = 1;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+    LOG(INFO) << "Attempt " << attempt;
 
     absl::Mutex mu;
     bool value = false;  // condition value (under mu)
@@ -1457,13 +1597,13 @@
 
 TEST_P(TimeoutTest, ReaderLockWhen) {
   const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+  LOG(INFO) << "Params: " << params;
 
   // Because this test asserts bounds on scheduling delays it is flaky.  To
   // compensate it loops forever until it passes.  Failures express as test
   // timeouts, in which case the test log can be used to diagnose the issue.
   for (int attempt = 0;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+    LOG(INFO) << "Attempt " << attempt;
 
     absl::Mutex mu;
     bool value = false;  // condition value (under mu)
@@ -1493,13 +1633,13 @@
 
 TEST_P(TimeoutTest, Wait) {
   const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+  LOG(INFO) << "Params: " << params;
 
   // Because this test asserts bounds on scheduling delays it is flaky.  To
   // compensate it loops forever until it passes.  Failures express as test
   // timeouts, in which case the test log can be used to diagnose the issue.
   for (int attempt = 0;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+    LOG(INFO) << "Attempt " << attempt;
 
     absl::Mutex mu;
     bool value = false;  // condition value (under mu)
@@ -1663,8 +1803,7 @@
 TEST(Mutex, CVTime) {
   int threads = 10;  // Use a fixed thread count of 10
   int iterations = 1;
-  EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1),
-            threads * iterations);
+  EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations);
 }
 
 TEST(Mutex, MuTime) {
@@ -1673,4 +1812,85 @@
   EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
 }
 
+TEST(Mutex, SignalExitedThread) {
+  // The test may expose a race when Mutex::Unlock signals a thread
+  // that has already exited.
+#if defined(__wasm__) || defined(__asmjs__)
+  constexpr int kThreads = 1;  // OOMs under WASM
+#else
+  constexpr int kThreads = 100;
+#endif
+  std::vector<std::thread> top;
+  for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) {
+    top.emplace_back([&]() {
+      for (int i = 0; i < kThreads; i++) {
+        absl::Mutex mu;
+        std::thread t([&]() {
+          mu.Lock();
+          mu.Unlock();
+        });
+        mu.Lock();
+        mu.Unlock();
+        t.join();
+      }
+    });
+  }
+  for (auto &th : top) th.join();
+}
+
+TEST(Mutex, WriterPriority) {
+  absl::Mutex mu;
+  bool wrote = false;
+  std::atomic<bool> saw_wrote{false};
+  auto readfunc = [&]() {
+    for (size_t i = 0; i < 10; ++i) {
+      absl::ReaderMutexLock lock(&mu);
+      if (wrote) {
+        saw_wrote = true;
+        break;
+      }
+      absl::SleepFor(absl::Seconds(1));
+    }
+  };
+  std::thread t1(readfunc);
+  absl::SleepFor(absl::Milliseconds(500));
+  std::thread t2(readfunc);
+  // Note: this test guards against a bug that was related to an uninit
+  // PerThreadSynch::priority, so the writer intentionally runs on a new thread.
+  std::thread t3([&]() {
+    // The writer should be able squeeze between the two alternating readers.
+    absl::MutexLock lock(&mu);
+    wrote = true;
+  });
+  t1.join();
+  t2.join();
+  t3.join();
+  EXPECT_TRUE(saw_wrote.load());
+}
+
+TEST(Mutex, LockWhenWithTimeoutResult) {
+  // Check various corner cases for Await/LockWhen return value
+  // with always true/always false conditions.
+  absl::Mutex mu;
+  const bool kAlwaysTrue = true, kAlwaysFalse = false;
+  const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
+  EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+  mu.Unlock();
+  EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+  EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
+  EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
+  std::thread th1([&]() {
+    EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+    mu.Unlock();
+  });
+  std::thread th2([&]() {
+    EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+    mu.Unlock();
+  });
+  absl::SleepFor(absl::Milliseconds(100));
+  mu.Unlock();
+  th1.join();
+  th2.join();
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/synchronization/notification.cc b/abseil-cpp/absl/synchronization/notification.cc
index e91b903..165ba66 100644
--- a/abseil-cpp/absl/synchronization/notification.cc
+++ b/abseil-cpp/absl/synchronization/notification.cc
@@ -16,7 +16,6 @@
 
 #include <atomic>
 
-#include "absl/base/attributes.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/synchronization/mutex.h"
 #include "absl/time/time.h"
diff --git a/abseil-cpp/absl/synchronization/notification.h b/abseil-cpp/absl/synchronization/notification.h
index 9a354ca..8986d9a 100644
--- a/abseil-cpp/absl/synchronization/notification.h
+++ b/abseil-cpp/absl/synchronization/notification.h
@@ -22,7 +22,7 @@
 // The `Notification` object maintains a private boolean "notified" state that
 // transitions to `true` at most once. The `Notification` class provides the
 // following primary member functions:
-//   * `HasBeenNotified() `to query its state
+//   * `HasBeenNotified()` to query its state
 //   * `WaitForNotification*()` to have threads wait until the "notified" state
 //      is `true`.
 //   * `Notify()` to set the notification's "notified" state to `true` and
@@ -52,7 +52,7 @@
 
 #include <atomic>
 
-#include "absl/base/macros.h"
+#include "absl/base/attributes.h"
 #include "absl/synchronization/mutex.h"
 #include "absl/time/time.h"
 
@@ -74,7 +74,7 @@
   // Notification::HasBeenNotified()
   //
   // Returns the value of the notification's internal "notified" state.
-  bool HasBeenNotified() const {
+  ABSL_MUST_USE_RESULT bool HasBeenNotified() const {
     return HasBeenNotifiedInternal(&this->notified_yet_);
   }
 
diff --git a/abseil-cpp/absl/synchronization/notification_test.cc b/abseil-cpp/absl/synchronization/notification_test.cc
index 100ea76..49ce61a 100644
--- a/abseil-cpp/absl/synchronization/notification_test.cc
+++ b/abseil-cpp/absl/synchronization/notification_test.cc
@@ -79,7 +79,7 @@
 
   // Allow for a slight early return, to account for quality of implementation
   // issues on various platforms.
-  const absl::Duration slop = absl::Microseconds(200);
+  const absl::Duration slop = absl::Milliseconds(5);
   EXPECT_LE(delay - slop, elapsed)
       << "WaitForNotificationWithTimeout returned " << delay - elapsed
       << " early (with " << slop << " slop), start time was " << start;
diff --git a/abseil-cpp/absl/time/BUILD.bazel b/abseil-cpp/absl/time/BUILD.bazel
index 991241a..88d2088 100644
--- a/abseil-cpp/absl/time/BUILD.bazel
+++ b/abseil-cpp/absl/time/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -46,34 +45,29 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/numeric:int128",
         "//absl/strings",
         "//absl/time/internal/cctz:civil_time",
         "//absl/time/internal/cctz:time_zone",
+        "//absl/types:optional",
     ],
 )
 
 cc_library(
     name = "test_util",
     testonly = 1,
-    srcs = [
-        "internal/test_util.cc",
-        "internal/zoneinfo.inc",
-    ],
+    srcs = ["internal/test_util.cc"],
     hdrs = ["internal/test_util.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl/time:__pkg__",
-    ],
+    visibility = ["//visibility:private"],
     deps = [
         ":time",
         "//absl/base:config",
         "//absl/base:raw_logging_internal",
-        "//absl/time/internal/cctz:time_zone",
-        "@com_google_googletest//:gtest",
     ],
 )
 
@@ -88,6 +82,8 @@
         "time_zone_test.cc",
     ],
     copts = ABSL_TEST_COPTS,
+    data = ["//absl/time/internal/cctz:zoneinfo"],
+    env = {"TZDIR": "absl/time/internal/cctz/testdata/zoneinfo"},
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":test_util",
@@ -95,12 +91,37 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/numeric:int128",
+        "//absl/strings:str_format",
         "//absl/time/internal/cctz:time_zone",
         "@com_google_googletest//:gtest_main",
     ],
 )
 
 cc_test(
+    name = "flag_test",
+    srcs = [
+        "flag_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_android_arm",
+        "no_test_android_arm64",
+        "no_test_android_x86",
+        "no_test_ios_x86_64",
+        "no_test_lexan",
+        "no_test_loonix",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":time",
+        "//absl/flags:flag",
+        "//absl/flags:reflection",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "time_benchmark",
     srcs = [
         "civil_time_benchmark.cc",
@@ -110,6 +131,8 @@
         "time_benchmark.cc",
     ],
     copts = ABSL_TEST_COPTS,
+    data = ["//absl/time/internal/cctz:zoneinfo"],
+    env = {"TZDIR": "absl/time/internal/cctz/testdata/zoneinfo"},
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = [
         "benchmark",
@@ -119,6 +142,7 @@
         ":time",
         "//absl/base",
         "//absl/base:core_headers",
+        "//absl/flags:flag",
         "//absl/hash",
         "@com_github_google_benchmark//:benchmark_main",
     ],
diff --git a/abseil-cpp/absl/time/CMakeLists.txt b/abseil-cpp/absl/time/CMakeLists.txt
index 00bdd49..e1ade7a 100644
--- a/abseil-cpp/absl/time/CMakeLists.txt
+++ b/abseil-cpp/absl/time/CMakeLists.txt
@@ -54,10 +54,6 @@
     ${ABSL_DEFAULT_COPTS}
 )
 
-if(APPLE)
-  find_library(CoreFoundation CoreFoundation)
-endif()
-
 absl_cc_library(
   NAME
     time_zone
@@ -84,9 +80,13 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
-    $<$<PLATFORM_ID:Darwin>:${CoreFoundation}>
+    Threads::Threads
+    # TODO(#1495): Use $<LINK_LIBRARY:FRAMEWORK,CoreFoundation> once our
+    # minimum CMake version >= 3.24
+    $<$<PLATFORM_ID:Darwin>:-Wl,-framework,CoreFoundation>
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     time_internal_test_util
@@ -94,7 +94,6 @@
     "internal/test_util.h"
   SRCS
     "internal/test_util.cc"
-    "internal/zoneinfo.inc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
@@ -102,7 +101,6 @@
     absl::config
     absl::raw_logging_internal
     absl::time_zone
-    gmock
   TESTONLY
 )
 
@@ -123,6 +121,21 @@
     absl::time
     absl::config
     absl::core_headers
+    absl::strings
+    absl::str_format
     absl::time_zone
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_test(
+  NAME
+    flag_test
+  SRCS
+    "flag_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::flags
+    absl::flags_reflection
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/time/civil_time.cc b/abseil-cpp/absl/time/civil_time.cc
index bdfe9ce..65df39d 100644
--- a/abseil-cpp/absl/time/civil_time.cc
+++ b/abseil-cpp/absl/time/civil_time.cc
@@ -15,6 +15,7 @@
 #include "absl/time/civil_time.h"
 
 #include <cstdlib>
+#include <ostream>
 #include <string>
 
 #include "absl/strings/str_cat.h"
@@ -38,9 +39,7 @@
   const CivilSecond ncs(NormalizeYear(cs.year()), cs.month(), cs.day(),
                         cs.hour(), cs.minute(), cs.second());
   const TimeZone utc = UTCTimeZone();
-  // TODO(absl-team): Avoid conversion of fmt string.
-  return StrCat(cs.year(),
-                FormatTime(std::string(fmt), FromCivil(ncs, utc), utc));
+  return StrCat(cs.year(), FormatTime(fmt, FromCivil(ncs, utc), utc));
 }
 
 template <typename CivilT>
@@ -169,6 +168,31 @@
   return os << FormatCivilTime(s);
 }
 
+bool AbslParseFlag(string_view s, CivilSecond* c, std::string*) {
+  return ParseLenientCivilTime(s, c);
+}
+bool AbslParseFlag(string_view s, CivilMinute* c, std::string*) {
+  return ParseLenientCivilTime(s, c);
+}
+bool AbslParseFlag(string_view s, CivilHour* c, std::string*) {
+  return ParseLenientCivilTime(s, c);
+}
+bool AbslParseFlag(string_view s, CivilDay* c, std::string*) {
+  return ParseLenientCivilTime(s, c);
+}
+bool AbslParseFlag(string_view s, CivilMonth* c, std::string*) {
+  return ParseLenientCivilTime(s, c);
+}
+bool AbslParseFlag(string_view s, CivilYear* c, std::string*) {
+  return ParseLenientCivilTime(s, c);
+}
+std::string AbslUnparseFlag(CivilSecond c) { return FormatCivilTime(c); }
+std::string AbslUnparseFlag(CivilMinute c) { return FormatCivilTime(c); }
+std::string AbslUnparseFlag(CivilHour c) { return FormatCivilTime(c); }
+std::string AbslUnparseFlag(CivilDay c) { return FormatCivilTime(c); }
+std::string AbslUnparseFlag(CivilMonth c) { return FormatCivilTime(c); }
+std::string AbslUnparseFlag(CivilYear c) { return FormatCivilTime(c); }
+
 }  // namespace time_internal
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/time/civil_time.h b/abseil-cpp/absl/time/civil_time.h
index bb46004..5855bc7 100644
--- a/abseil-cpp/absl/time/civil_time.h
+++ b/abseil-cpp/absl/time/civil_time.h
@@ -70,8 +70,10 @@
 #ifndef ABSL_TIME_CIVIL_TIME_H_
 #define ABSL_TIME_CIVIL_TIME_H_
 
+#include <iosfwd>
 #include <string>
 
+#include "absl/base/config.h"
 #include "absl/strings/string_view.h"
 #include "absl/time/internal/cctz/include/cctz/civil_time.h"
 
@@ -530,6 +532,29 @@
 std::ostream& operator<<(std::ostream& os, CivilMinute m);
 std::ostream& operator<<(std::ostream& os, CivilSecond s);
 
+// AbslParseFlag()
+//
+// Parses the command-line flag string representation `s` into a civil-time
+// value. Flags must be specified in a format that is valid for
+// `absl::ParseLenientCivilTime()`.
+bool AbslParseFlag(absl::string_view s, CivilSecond* c, std::string* error);
+bool AbslParseFlag(absl::string_view s, CivilMinute* c, std::string* error);
+bool AbslParseFlag(absl::string_view s, CivilHour* c, std::string* error);
+bool AbslParseFlag(absl::string_view s, CivilDay* c, std::string* error);
+bool AbslParseFlag(absl::string_view s, CivilMonth* c, std::string* error);
+bool AbslParseFlag(absl::string_view s, CivilYear* c, std::string* error);
+
+// AbslUnparseFlag()
+//
+// Unparses a civil-time value into a command-line string representation using
+// the format specified by `absl::ParseCivilTime()`.
+std::string AbslUnparseFlag(CivilSecond c);
+std::string AbslUnparseFlag(CivilMinute c);
+std::string AbslUnparseFlag(CivilHour c);
+std::string AbslUnparseFlag(CivilDay c);
+std::string AbslUnparseFlag(CivilMonth c);
+std::string AbslUnparseFlag(CivilYear c);
+
 }  // namespace time_internal
 
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/time/civil_time_test.cc b/abseil-cpp/absl/time/civil_time_test.cc
index 0ebd97a..ec435ac 100644
--- a/abseil-cpp/absl/time/civil_time_test.cc
+++ b/abseil-cpp/absl/time/civil_time_test.cc
@@ -1228,7 +1228,7 @@
   EXPECT_EQ(0, day_floor.hour());  // 09:09:09 is floored
   EXPECT_EQ(absl::CivilDay(2015, 1, 2), day_floor);
 
-  // Unspecified fields default to their minium value
+  // Unspecified fields default to their minimum value
   absl::CivilDay day_default(2015);  // Defaults to Jan 1
   EXPECT_EQ(absl::CivilDay(2015, 1, 1), day_default);
 
diff --git a/abseil-cpp/absl/time/clock.cc b/abseil-cpp/absl/time/clock.cc
index e5c423c..aa74367 100644
--- a/abseil-cpp/absl/time/clock.cc
+++ b/abseil-cpp/absl/time/clock.cc
@@ -15,6 +15,7 @@
 #include "absl/time/clock.h"
 
 #include "absl/base/attributes.h"
+#include "absl/base/optimization.h"
 
 #ifdef _WIN32
 #include <windows.h>
@@ -47,17 +48,16 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-// Decide if we should use the fast GetCurrentTimeNanos() algorithm
-// based on the cyclecounter, otherwise just get the time directly
-// from the OS on every call. This can be chosen at compile-time via
+// Decide if we should use the fast GetCurrentTimeNanos() algorithm based on the
+// cyclecounter, otherwise just get the time directly from the OS on every call.
+// By default, the fast algorithm based on the cyclecount is disabled because in
+// certain situations, for example, if the OS enters a "sleep" mode, it may
+// produce incorrect values immediately upon waking.
+// This can be chosen at compile-time via
 // -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
 #ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
-#if ABSL_USE_UNSCALED_CYCLECLOCK
-#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1
-#else
 #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
 #endif
-#endif
 
 #if defined(__APPLE__) || defined(_WIN32)
 #include "absl/time/internal/get_current_time_chrono.inc"
@@ -74,9 +74,7 @@
 #if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-int64_t GetCurrentTimeNanos() {
-  return GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
-}
+int64_t GetCurrentTimeNanos() { return GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); }
 ABSL_NAMESPACE_END
 }  // namespace absl
 #else  // Use the cyclecounter-based implementation below.
@@ -87,13 +85,6 @@
   ::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
 #endif
 
-// The following counters are used only by the test code.
-static int64_t stats_initializations;
-static int64_t stats_reinitializations;
-static int64_t stats_calibrations;
-static int64_t stats_slow_paths;
-static int64_t stats_fast_slow_paths;
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace time_internal {
@@ -107,72 +98,6 @@
 
 // uint64_t is used in this module to provide an extra bit in multiplications
 
-// Return the time in ns as told by the kernel interface.  Place in *cycleclock
-// the value of the cycleclock at about the time of the syscall.
-// This call represents the time base that this module synchronizes to.
-// Ensures that *cycleclock does not step back by up to (1 << 16) from
-// last_cycleclock, to discard small backward counter steps.  (Larger steps are
-// assumed to be complete resyncs, which shouldn't happen.  If they do, a full
-// reinitialization of the outer algorithm should occur.)
-static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
-                                             uint64_t *cycleclock) {
-  // We try to read clock values at about the same time as the kernel clock.
-  // This value gets adjusted up or down as estimate of how long that should
-  // take, so we can reject attempts that take unusually long.
-  static std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
-
-  uint64_t local_approx_syscall_time_in_cycles =  // local copy
-      approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
-
-  int64_t current_time_nanos_from_system;
-  uint64_t before_cycles;
-  uint64_t after_cycles;
-  uint64_t elapsed_cycles;
-  int loops = 0;
-  do {
-    before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
-    current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
-    after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
-    // elapsed_cycles is unsigned, so is large on overflow
-    elapsed_cycles = after_cycles - before_cycles;
-    if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
-        ++loops == 20) {  // clock changed frequencies?  Back off.
-      loops = 0;
-      if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
-        local_approx_syscall_time_in_cycles =
-            (local_approx_syscall_time_in_cycles + 1) << 1;
-      }
-      approx_syscall_time_in_cycles.store(
-          local_approx_syscall_time_in_cycles,
-          std::memory_order_relaxed);
-    }
-  } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
-           last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
-
-  // Number of times in a row we've seen a kernel time call take substantially
-  // less than approx_syscall_time_in_cycles.
-  static std::atomic<uint32_t> seen_smaller{ 0 };
-
-  // Adjust approx_syscall_time_in_cycles to be within a factor of 2
-  // of the typical time to execute one iteration of the loop above.
-  if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
-    // measured time is no smaller than half current approximation
-    seen_smaller.store(0, std::memory_order_relaxed);
-  } else if (seen_smaller.fetch_add(1, std::memory_order_relaxed) >= 3) {
-    // smaller delays several times in a row; reduce approximation by 12.5%
-    const uint64_t new_approximation =
-        local_approx_syscall_time_in_cycles -
-        (local_approx_syscall_time_in_cycles >> 3);
-    approx_syscall_time_in_cycles.store(new_approximation,
-                                        std::memory_order_relaxed);
-    seen_smaller.store(0, std::memory_order_relaxed);
-  }
-
-  *cycleclock = after_cycles;
-  return current_time_nanos_from_system;
-}
-
-
 // ---------------------------------------------------------------------
 // An implementation of reader-write locks that use no atomic ops in the read
 // case.  This is a generalization of Lamport's method for reading a multiword
@@ -224,32 +149,112 @@
                kMinNSBetweenSamples,
                "cannot represent kMaxBetweenSamplesNSScaled");
 
-// A reader-writer lock protecting the static locations below.
-// See SeqAcquire() and SeqRelease() above.
-ABSL_CONST_INIT static absl::base_internal::SpinLock lock(
-    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static std::atomic<uint64_t> seq(0);
-
 // data from a sample of the kernel's time value
 struct TimeSampleAtomic {
-  std::atomic<uint64_t> raw_ns;              // raw kernel time
-  std::atomic<uint64_t> base_ns;             // our estimate of time
-  std::atomic<uint64_t> base_cycles;         // cycle counter reading
-  std::atomic<uint64_t> nsscaled_per_cycle;  // cycle period
+  std::atomic<uint64_t> raw_ns{0};              // raw kernel time
+  std::atomic<uint64_t> base_ns{0};             // our estimate of time
+  std::atomic<uint64_t> base_cycles{0};         // cycle counter reading
+  std::atomic<uint64_t> nsscaled_per_cycle{0};  // cycle period
   // cycles before we'll sample again (a scaled reciprocal of the period,
   // to avoid a division on the fast path).
-  std::atomic<uint64_t> min_cycles_per_sample;
+  std::atomic<uint64_t> min_cycles_per_sample{0};
 };
 // Same again, but with non-atomic types
 struct TimeSample {
-  uint64_t raw_ns;                 // raw kernel time
-  uint64_t base_ns;                // our estimate of time
-  uint64_t base_cycles;            // cycle counter reading
-  uint64_t nsscaled_per_cycle;     // cycle period
-  uint64_t min_cycles_per_sample;  // approx cycles before next sample
+  uint64_t raw_ns = 0;                 // raw kernel time
+  uint64_t base_ns = 0;                // our estimate of time
+  uint64_t base_cycles = 0;            // cycle counter reading
+  uint64_t nsscaled_per_cycle = 0;     // cycle period
+  uint64_t min_cycles_per_sample = 0;  // approx cycles before next sample
 };
 
-static struct TimeSampleAtomic last_sample;   // the last sample; under seq
+struct ABSL_CACHELINE_ALIGNED TimeState {
+  std::atomic<uint64_t> seq{0};
+  TimeSampleAtomic last_sample;  // the last sample; under seq
+
+  // The following counters are used only by the test code.
+  int64_t stats_initializations{0};
+  int64_t stats_reinitializations{0};
+  int64_t stats_calibrations{0};
+  int64_t stats_slow_paths{0};
+  int64_t stats_fast_slow_paths{0};
+
+  uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
+
+  // Used by GetCurrentTimeNanosFromKernel().
+  // We try to read clock values at about the same time as the kernel clock.
+  // This value gets adjusted up or down as estimate of how long that should
+  // take, so we can reject attempts that take unusually long.
+  std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
+  // Number of times in a row we've seen a kernel time call take substantially
+  // less than approx_syscall_time_in_cycles.
+  std::atomic<uint32_t> kernel_time_seen_smaller{0};
+
+  // A reader-writer lock protecting the static locations below.
+  // See SeqAcquire() and SeqRelease() above.
+  absl::base_internal::SpinLock lock{absl::kConstInit,
+                                     base_internal::SCHEDULE_KERNEL_ONLY};
+};
+ABSL_CONST_INIT static TimeState time_state;
+
+// Return the time in ns as told by the kernel interface.  Place in *cycleclock
+// the value of the cycleclock at about the time of the syscall.
+// This call represents the time base that this module synchronizes to.
+// Ensures that *cycleclock does not step back by up to (1 << 16) from
+// last_cycleclock, to discard small backward counter steps.  (Larger steps are
+// assumed to be complete resyncs, which shouldn't happen.  If they do, a full
+// reinitialization of the outer algorithm should occur.)
+static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
+                                             uint64_t *cycleclock)
+    ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
+  uint64_t local_approx_syscall_time_in_cycles =  // local copy
+      time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
+
+  int64_t current_time_nanos_from_system;
+  uint64_t before_cycles;
+  uint64_t after_cycles;
+  uint64_t elapsed_cycles;
+  int loops = 0;
+  do {
+    before_cycles =
+        static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
+    current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
+    after_cycles =
+        static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
+    // elapsed_cycles is unsigned, so is large on overflow
+    elapsed_cycles = after_cycles - before_cycles;
+    if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
+        ++loops == 20) {  // clock changed frequencies?  Back off.
+      loops = 0;
+      if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
+        local_approx_syscall_time_in_cycles =
+            (local_approx_syscall_time_in_cycles + 1) << 1;
+      }
+      time_state.approx_syscall_time_in_cycles.store(
+          local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
+    }
+  } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
+           last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
+
+  // Adjust approx_syscall_time_in_cycles to be within a factor of 2
+  // of the typical time to execute one iteration of the loop above.
+  if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
+    // measured time is no smaller than half current approximation
+    time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
+  } else if (time_state.kernel_time_seen_smaller.fetch_add(
+                 1, std::memory_order_relaxed) >= 3) {
+    // smaller delays several times in a row; reduce approximation by 12.5%
+    const uint64_t new_approximation =
+        local_approx_syscall_time_in_cycles -
+        (local_approx_syscall_time_in_cycles >> 3);
+    time_state.approx_syscall_time_in_cycles.store(new_approximation,
+                                                   std::memory_order_relaxed);
+    time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
+  }
+
+  *cycleclock = after_cycles;
+  return current_time_nanos_from_system;
+}
 
 static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
 
@@ -312,19 +317,21 @@
   // contribute to register pressure - reading it early before initializing
   // the other pieces of the calculation minimizes spill/restore instructions,
   // minimizing icache cost.
-  uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+  uint64_t now_cycles =
+      static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
 
   // Acquire pairs with the barrier in SeqRelease - if this load sees that
   // store, the shared-data reads necessarily see that SeqRelease's updates
   // to the same shared data.
-  seq_read0 = seq.load(std::memory_order_acquire);
+  seq_read0 = time_state.seq.load(std::memory_order_acquire);
 
-  base_ns = last_sample.base_ns.load(std::memory_order_relaxed);
-  base_cycles = last_sample.base_cycles.load(std::memory_order_relaxed);
+  base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
+  base_cycles =
+      time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
   nsscaled_per_cycle =
-      last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
-  min_cycles_per_sample =
-      last_sample.min_cycles_per_sample.load(std::memory_order_relaxed);
+      time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
+  min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
+      std::memory_order_relaxed);
 
   // This acquire fence pairs with the release fence in SeqAcquire.  Since it
   // is sequenced between reads of shared data and seq_read1, the reads of
@@ -335,7 +342,7 @@
   // shared-data writes are effectively release ordered. Therefore if our
   // shared-data reads see any of a particular update's shared-data writes,
   // seq_read1 is guaranteed to see that update's SeqAcquire.
-  seq_read1 = seq.load(std::memory_order_relaxed);
+  seq_read1 = time_state.seq.load(std::memory_order_relaxed);
 
   // Fast path.  Return if min_cycles_per_sample has not yet elapsed since the
   // last sample, and we read a consistent sample.  The fast path activates
@@ -348,10 +355,11 @@
   // last_sample was updated). This is harmless, because delta_cycles will wrap
   // and report a time much much bigger than min_cycles_per_sample. In that case
   // we will take the slow path.
-  uint64_t delta_cycles = now_cycles - base_cycles;
+  uint64_t delta_cycles;
   if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
-      delta_cycles < min_cycles_per_sample) {
-    return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
+      (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
+    return static_cast<int64_t>(
+        base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale));
   }
   return GetCurrentTimeNanosSlowPath();
 }
@@ -390,24 +398,25 @@
 // TODO(absl-team): Remove this attribute when our compiler is smart enough
 // to do the right thing.
 ABSL_ATTRIBUTE_NOINLINE
-static int64_t GetCurrentTimeNanosSlowPath() ABSL_LOCKS_EXCLUDED(lock) {
+static int64_t GetCurrentTimeNanosSlowPath()
+    ABSL_LOCKS_EXCLUDED(time_state.lock) {
   // Serialize access to slow-path.  Fast-path readers are not blocked yet, and
   // code below must not modify last_sample until the seqlock is acquired.
-  lock.Lock();
+  time_state.lock.Lock();
 
   // Sample the kernel time base.  This is the definition of
   // "now" if we take the slow path.
-  static uint64_t last_now_cycles;  // protected by lock
   uint64_t now_cycles;
-  uint64_t now_ns = GetCurrentTimeNanosFromKernel(last_now_cycles, &now_cycles);
-  last_now_cycles = now_cycles;
+  uint64_t now_ns = static_cast<uint64_t>(
+      GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles));
+  time_state.last_now_cycles = now_cycles;
 
   uint64_t estimated_base_ns;
 
   // ----------
   // Read the "last_sample" values again; this time holding the write lock.
   struct TimeSample sample;
-  ReadTimeSampleAtomic(&last_sample, &sample);
+  ReadTimeSampleAtomic(&time_state.last_sample, &sample);
 
   // ----------
   // Try running the fast path again; another thread may have updated the
@@ -418,15 +427,15 @@
     // so that blocked readers can make progress without blocking new readers.
     estimated_base_ns = sample.base_ns +
         ((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
-    stats_fast_slow_paths++;
+    time_state.stats_fast_slow_paths++;
   } else {
     estimated_base_ns =
         UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
   }
 
-  lock.Unlock();
+  time_state.lock.Unlock();
 
-  return estimated_base_ns;
+  return static_cast<int64_t>(estimated_base_ns);
 }
 
 // Main part of the algorithm.  Locks out readers, updates the approximation
@@ -435,9 +444,10 @@
 static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
                                  uint64_t delta_cycles,
                                  const struct TimeSample *sample)
-    ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock) {
+    ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
   uint64_t estimated_base_ns = now_ns;
-  uint64_t lock_value = SeqAcquire(&seq);  // acquire seqlock to block readers
+  uint64_t lock_value =
+      SeqAcquire(&time_state.seq);  // acquire seqlock to block readers
 
   // The 5s in the next if-statement limits the time for which we will trust
   // the cycle counter and our last sample to give a reasonable result.
@@ -447,12 +457,16 @@
       sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
       now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
     // record this sample, and forget any previously known slope.
-    last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
-    last_sample.base_ns.store(estimated_base_ns, std::memory_order_relaxed);
-    last_sample.base_cycles.store(now_cycles, std::memory_order_relaxed);
-    last_sample.nsscaled_per_cycle.store(0, std::memory_order_relaxed);
-    last_sample.min_cycles_per_sample.store(0, std::memory_order_relaxed);
-    stats_initializations++;
+    time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+    time_state.last_sample.base_ns.store(estimated_base_ns,
+                                         std::memory_order_relaxed);
+    time_state.last_sample.base_cycles.store(now_cycles,
+                                             std::memory_order_relaxed);
+    time_state.last_sample.nsscaled_per_cycle.store(0,
+                                                    std::memory_order_relaxed);
+    time_state.last_sample.min_cycles_per_sample.store(
+        0, std::memory_order_relaxed);
+    time_state.stats_initializations++;
   } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
              sample->base_cycles + 50 < now_cycles) {
     // Enough time has passed to compute the cycle time.
@@ -478,7 +492,8 @@
     uint64_t assumed_next_sample_delta_cycles =
         SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
 
-    int64_t diff_ns = now_ns - estimated_base_ns;  // estimate low by this much
+    // Estimate low by this much.
+    int64_t diff_ns = static_cast<int64_t>(now_ns - estimated_base_ns);
 
     // We want to set nsscaled_per_cycle so that our estimate of the ns time
     // at the assumed cycle time is the assumed ns time.
@@ -489,34 +504,39 @@
     // of our current error, by solving:
     //  kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
     //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
-    ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16);
+    ns = static_cast<uint64_t>(static_cast<int64_t>(kMinNSBetweenSamples) +
+                               diff_ns - (diff_ns / 16));
     uint64_t new_nsscaled_per_cycle =
         SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
     if (new_nsscaled_per_cycle != 0 &&
         diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
       // record the cycle time measurement
-      last_sample.nsscaled_per_cycle.store(
+      time_state.last_sample.nsscaled_per_cycle.store(
           new_nsscaled_per_cycle, std::memory_order_relaxed);
       uint64_t new_min_cycles_per_sample =
           SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
-      last_sample.min_cycles_per_sample.store(
+      time_state.last_sample.min_cycles_per_sample.store(
           new_min_cycles_per_sample, std::memory_order_relaxed);
-      stats_calibrations++;
+      time_state.stats_calibrations++;
     } else {  // something went wrong; forget the slope
-      last_sample.nsscaled_per_cycle.store(0, std::memory_order_relaxed);
-      last_sample.min_cycles_per_sample.store(0, std::memory_order_relaxed);
+      time_state.last_sample.nsscaled_per_cycle.store(
+          0, std::memory_order_relaxed);
+      time_state.last_sample.min_cycles_per_sample.store(
+          0, std::memory_order_relaxed);
       estimated_base_ns = now_ns;
-      stats_reinitializations++;
+      time_state.stats_reinitializations++;
     }
-    last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
-    last_sample.base_ns.store(estimated_base_ns, std::memory_order_relaxed);
-    last_sample.base_cycles.store(now_cycles, std::memory_order_relaxed);
+    time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+    time_state.last_sample.base_ns.store(estimated_base_ns,
+                                         std::memory_order_relaxed);
+    time_state.last_sample.base_cycles.store(now_cycles,
+                                             std::memory_order_relaxed);
   } else {
     // have a sample, but no slope; waiting for enough time for a calibration
-    stats_slow_paths++;
+    time_state.stats_slow_paths++;
   }
 
-  SeqRelease(&seq, lock_value);  // release the readers
+  SeqRelease(&time_state.seq, lock_value);  // release the readers
 
   return estimated_base_ns;
 }
@@ -543,7 +563,7 @@
 // REQUIRES: to_sleep <= MaxSleep().
 void SleepOnce(absl::Duration to_sleep) {
 #ifdef _WIN32
-  Sleep(to_sleep / absl::Milliseconds(1));
+  Sleep(static_cast<DWORD>(to_sleep / absl::Milliseconds(1)));
 #else
   struct timespec sleep_time = absl::ToTimespec(to_sleep);
   while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
@@ -558,7 +578,8 @@
 
 extern "C" {
 
-ABSL_ATTRIBUTE_WEAK void AbslInternalSleepFor(absl::Duration duration) {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
+    absl::Duration duration) {
   while (duration > absl::ZeroDuration()) {
     absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
     absl::SleepOnce(to_sleep);
diff --git a/abseil-cpp/absl/time/clock.h b/abseil-cpp/absl/time/clock.h
index 27764a9..5fe244d 100644
--- a/abseil-cpp/absl/time/clock.h
+++ b/abseil-cpp/absl/time/clock.h
@@ -64,11 +64,11 @@
 // By changing our extension points to be extern "C", we dodge this
 // check.
 extern "C" {
-void AbslInternalSleepFor(absl::Duration duration);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration);
 }  // extern "C"
 
 inline void absl::SleepFor(absl::Duration duration) {
-  AbslInternalSleepFor(duration);
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration);
 }
 
 #endif  // ABSL_TIME_CLOCK_H_
diff --git a/abseil-cpp/absl/time/clock_test.cc b/abseil-cpp/absl/time/clock_test.cc
index 4bcfc6b..bc77dbc 100644
--- a/abseil-cpp/absl/time/clock_test.cc
+++ b/abseil-cpp/absl/time/clock_test.cc
@@ -18,6 +18,10 @@
 #if defined(ABSL_HAVE_ALARM)
 #include <signal.h>
 #include <unistd.h>
+#ifdef _AIX
+// sig_t is not defined in AIX.
+typedef void (*sig_t)(int);
+#endif
 #elif defined(__linux__) || defined(__APPLE__)
 #error all known Linux and Apple targets have alarm
 #endif
diff --git a/abseil-cpp/absl/time/duration.cc b/abseil-cpp/absl/time/duration.cc
index 952cc09..634e5d5 100644
--- a/abseil-cpp/absl/time/duration.cc
+++ b/abseil-cpp/absl/time/duration.cc
@@ -96,13 +96,6 @@
   return d != 0.0;
 }
 
-// Can't use std::round() because it is only available in C++11.
-// Note that we ignore the possibility of floating-point over/underflow.
-template <typename Double>
-inline double Round(Double d) {
-  return d < 0 ? std::ceil(d - 0.5) : std::floor(d + 0.5);
-}
-
 // *sec may be positive or negative.  *ticks must be in the range
 // -kTicksPerSecond < *ticks < kTicksPerSecond.  If *ticks is negative it
 // will be normalized to a positive value by adjusting *sec accordingly.
@@ -260,7 +253,7 @@
   double lo_frac = std::modf(lo_doub, &lo_int);
 
   // Rolls lo into hi if necessary.
-  int64_t lo64 = Round(lo_frac * kTicksPerSecond);
+  int64_t lo64 = std::round(lo_frac * kTicksPerSecond);
 
   Duration ans;
   if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
@@ -356,7 +349,7 @@
 // the remainder.  If it does not saturate, the remainder remain accurate,
 // but the returned quotient will over/underflow int64_t and should not be used.
 int64_t IDivDuration(bool satq, const Duration num, const Duration den,
-                   Duration* rem) {
+                     Duration* rem) {
   int64_t q = 0;
   if (IDivFastPath(num, den, &q, rem)) {
     return q;
@@ -407,16 +400,18 @@
 Duration& Duration::operator+=(Duration rhs) {
   if (time_internal::IsInfiniteDuration(*this)) return *this;
   if (time_internal::IsInfiniteDuration(rhs)) return *this = rhs;
-  const int64_t orig_rep_hi = rep_hi_;
-  rep_hi_ =
-      DecodeTwosComp(EncodeTwosComp(rep_hi_) + EncodeTwosComp(rhs.rep_hi_));
+  const int64_t orig_rep_hi = rep_hi_.Get();
+  rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) +
+                           EncodeTwosComp(rhs.rep_hi_.Get()));
   if (rep_lo_ >= kTicksPerSecond - rhs.rep_lo_) {
-    rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) + 1);
+    rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) + 1);
     rep_lo_ -= kTicksPerSecond;
   }
   rep_lo_ += rhs.rep_lo_;
-  if (rhs.rep_hi_ < 0 ? rep_hi_ > orig_rep_hi : rep_hi_ < orig_rep_hi) {
-    return *this = rhs.rep_hi_ < 0 ? -InfiniteDuration() : InfiniteDuration();
+  if (rhs.rep_hi_.Get() < 0 ? rep_hi_.Get() > orig_rep_hi
+                            : rep_hi_.Get() < orig_rep_hi) {
+    return *this =
+               rhs.rep_hi_.Get() < 0 ? -InfiniteDuration() : InfiniteDuration();
   }
   return *this;
 }
@@ -424,18 +419,21 @@
 Duration& Duration::operator-=(Duration rhs) {
   if (time_internal::IsInfiniteDuration(*this)) return *this;
   if (time_internal::IsInfiniteDuration(rhs)) {
-    return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+    return *this = rhs.rep_hi_.Get() >= 0 ? -InfiniteDuration()
+                                          : InfiniteDuration();
   }
-  const int64_t orig_rep_hi = rep_hi_;
-  rep_hi_ =
-      DecodeTwosComp(EncodeTwosComp(rep_hi_) - EncodeTwosComp(rhs.rep_hi_));
+  const int64_t orig_rep_hi = rep_hi_.Get();
+  rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) -
+                           EncodeTwosComp(rhs.rep_hi_.Get()));
   if (rep_lo_ < rhs.rep_lo_) {
-    rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) - 1);
+    rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) - 1);
     rep_lo_ += kTicksPerSecond;
   }
   rep_lo_ -= rhs.rep_lo_;
-  if (rhs.rep_hi_ < 0 ? rep_hi_ < orig_rep_hi : rep_hi_ > orig_rep_hi) {
-    return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+  if (rhs.rep_hi_.Get() < 0 ? rep_hi_.Get() < orig_rep_hi
+                            : rep_hi_.Get() > orig_rep_hi) {
+    return *this = rhs.rep_hi_.Get() >= 0 ? -InfiniteDuration()
+                                          : InfiniteDuration();
   }
   return *this;
 }
@@ -446,7 +444,7 @@
 
 Duration& Duration::operator*=(int64_t r) {
   if (time_internal::IsInfiniteDuration(*this)) {
-    const bool is_neg = (r < 0) != (rep_hi_ < 0);
+    const bool is_neg = (r < 0) != (rep_hi_.Get() < 0);
     return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
   }
   return *this = ScaleFixed<SafeMultiply>(*this, r);
@@ -454,7 +452,7 @@
 
 Duration& Duration::operator*=(double r) {
   if (time_internal::IsInfiniteDuration(*this) || !IsFinite(r)) {
-    const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+    const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0);
     return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
   }
   return *this = ScaleDouble<std::multiplies>(*this, r);
@@ -462,7 +460,7 @@
 
 Duration& Duration::operator/=(int64_t r) {
   if (time_internal::IsInfiniteDuration(*this) || r == 0) {
-    const bool is_neg = (r < 0) != (rep_hi_ < 0);
+    const bool is_neg = (r < 0) != (rep_hi_.Get() < 0);
     return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
   }
   return *this = ScaleFixed<std::divides>(*this, r);
@@ -470,7 +468,7 @@
 
 Duration& Duration::operator/=(double r) {
   if (time_internal::IsInfiniteDuration(*this) || !IsValidDivisor(r)) {
-    const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+    const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0);
     return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
   }
   return *this = ScaleDouble<std::divides>(*this, r);
@@ -617,7 +615,7 @@
         rep_lo -= kTicksPerSecond;
       }
     }
-    ts.tv_sec = rep_hi;
+    ts.tv_sec = static_cast<decltype(ts.tv_sec)>(rep_hi);
     if (ts.tv_sec == rep_hi) {  // no time_t narrowing
       ts.tv_nsec = rep_lo / kTicksPerNanosecond;
       return ts;
@@ -645,7 +643,7 @@
       ts.tv_nsec -= 1000 * 1000 * 1000;
     }
   }
-  tv.tv_sec = ts.tv_sec;
+  tv.tv_sec = static_cast<decltype(tv.tv_sec)>(ts.tv_sec);
   if (tv.tv_sec != ts.tv_sec) {  // narrowing
     if (ts.tv_sec < 0) {
       tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
@@ -691,7 +689,7 @@
 char* Format64(char* ep, int width, int64_t v) {
   do {
     --width;
-    *--ep = '0' + (v % 10);  // contiguous digits
+    *--ep = static_cast<char>('0' + (v % 10));  // contiguous digits
   } while (v /= 10);
   while (--width >= 0) *--ep = '0';  // zero pad
   return ep;
@@ -728,7 +726,7 @@
   char* const ep = buf + sizeof(buf);
   char* bp = Format64(ep, 0, n);
   if (*bp != '0' || bp + 1 != ep) {
-    out->append(bp, ep - bp);
+    out->append(bp, static_cast<size_t>(ep - bp));
     out->append(unit.abbr.data(), unit.abbr.size());
   }
 }
@@ -741,16 +739,16 @@
   char buf[kBufferSize];  // also large enough to hold integer part
   char* ep = buf + sizeof(buf);
   double d = 0;
-  int64_t frac_part = Round(std::modf(n, &d) * unit.pow10);
+  int64_t frac_part = std::round(std::modf(n, &d) * unit.pow10);
   int64_t int_part = d;
   if (int_part != 0 || frac_part != 0) {
     char* bp = Format64(ep, 0, int_part);  // always < 1000
-    out->append(bp, ep - bp);
+    out->append(bp, static_cast<size_t>(ep - bp));
     if (frac_part != 0) {
       out->push_back('.');
       bp = Format64(ep, prec, frac_part);
       while (ep[-1] == '0') --ep;
-      out->append(bp, ep - bp);
+      out->append(bp, static_cast<size_t>(ep - bp));
     }
     out->append(unit.abbr.data(), unit.abbr.size());
   }
@@ -763,15 +761,17 @@
 //   form "72h3m0.5s". Leading zero units are omitted.  As a special
 //   case, durations less than one second format use a smaller unit
 //   (milli-, micro-, or nanoseconds) to ensure that the leading digit
-//   is non-zero.  The zero duration formats as 0, with no unit.
+//   is non-zero.
+// Unlike Go, we format the zero duration as 0, with no unit.
 std::string FormatDuration(Duration d) {
-  const Duration min_duration = Seconds(kint64min);
-  if (d == min_duration) {
+  constexpr Duration kMinDuration = Seconds(kint64min);
+  std::string s;
+  if (d == kMinDuration) {
     // Avoid needing to negate kint64min by directly returning what the
     // following code should produce in that case.
-    return "-2562047788015215h30m8s";
+    s = "-2562047788015215h30m8s";
+    return s;
   }
-  std::string s;
   if (d < ZeroDuration()) {
     s.append("-");
     d = -d;
@@ -839,7 +839,7 @@
 // in "*unit".  The given string pointer is modified to point to the first
 // unconsumed char.
 bool ConsumeDurationUnit(const char** start, const char* end, Duration* unit) {
-  size_t size = end - *start;
+  size_t size = static_cast<size_t>(end - *start);
   switch (size) {
     case 0:
       return false;
diff --git a/abseil-cpp/absl/time/duration_benchmark.cc b/abseil-cpp/absl/time/duration_benchmark.cc
index 83a836c..56820f3 100644
--- a/abseil-cpp/absl/time/duration_benchmark.cc
+++ b/abseil-cpp/absl/time/duration_benchmark.cc
@@ -18,9 +18,14 @@
 #include <string>
 
 #include "absl/base/attributes.h"
+#include "absl/flags/flag.h"
 #include "absl/time/time.h"
 #include "benchmark/benchmark.h"
 
+ABSL_FLAG(absl::Duration, absl_duration_flag_for_benchmark,
+          absl::Milliseconds(1),
+          "Flag to use for benchmarking duration flag access speed.");
+
 namespace {
 
 //
@@ -425,4 +430,15 @@
 }
 BENCHMARK(BM_Duration_ParseDuration)->DenseRange(0, kNumDurations - 1);
 
+//
+// Flag access
+//
+void BM_Duration_GetFlag(benchmark::State& state) {
+  while (state.KeepRunning()) {
+    benchmark::DoNotOptimize(
+        absl::GetFlag(FLAGS_absl_duration_flag_for_benchmark));
+  }
+}
+BENCHMARK(BM_Duration_GetFlag);
+
 }  // namespace
diff --git a/abseil-cpp/absl/time/duration_test.cc b/abseil-cpp/absl/time/duration_test.cc
index 4d85a2c..dcf7aad 100644
--- a/abseil-cpp/absl/time/duration_test.cc
+++ b/abseil-cpp/absl/time/duration_test.cc
@@ -16,6 +16,8 @@
 #include <winsock2.h>  // for timeval
 #endif
 
+#include <array>
+#include <cfloat>
 #include <chrono>  // NOLINT(build/c++11)
 #include <cmath>
 #include <cstdint>
@@ -27,6 +29,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/strings/str_format.h"
 #include "absl/time/time.h"
 
 namespace {
@@ -1320,7 +1323,7 @@
 
   EXPECT_EQ(absl::ZeroDuration(), absl::Seconds(0));
   // TODO(bww): Is the next one OK?
-  EXPECT_EQ(absl::ZeroDuration(), absl::Seconds(0.124999999e-9));
+  EXPECT_EQ(absl::ZeroDuration(), absl::Seconds(std::nextafter(0.125e-9, 0)));
   EXPECT_EQ(absl::Nanoseconds(1) / 4, absl::Seconds(0.125e-9));
   EXPECT_EQ(absl::Nanoseconds(1) / 4, absl::Seconds(0.250e-9));
   EXPECT_EQ(absl::Nanoseconds(1) / 2, absl::Seconds(0.375e-9));
@@ -1330,7 +1333,7 @@
   EXPECT_EQ(absl::Nanoseconds(1), absl::Seconds(0.875e-9));
   EXPECT_EQ(absl::Nanoseconds(1), absl::Seconds(1.000e-9));
 
-  EXPECT_EQ(absl::ZeroDuration(), absl::Seconds(-0.124999999e-9));
+  EXPECT_EQ(absl::ZeroDuration(), absl::Seconds(std::nextafter(-0.125e-9, 0)));
   EXPECT_EQ(-absl::Nanoseconds(1) / 4, absl::Seconds(-0.125e-9));
   EXPECT_EQ(-absl::Nanoseconds(1) / 4, absl::Seconds(-0.250e-9));
   EXPECT_EQ(-absl::Nanoseconds(1) / 2, absl::Seconds(-0.375e-9));
@@ -1369,10 +1372,13 @@
   EXPECT_THAT(ToTimeval(absl::Nanoseconds(2000)), TimevalMatcher(tv));
 }
 
-void VerifySameAsMul(double time_as_seconds, int* const misses) {
+void VerifyApproxSameAsMul(double time_as_seconds, int* const misses) {
   auto direct_seconds = absl::Seconds(time_as_seconds);
   auto mul_by_one_second = time_as_seconds * absl::Seconds(1);
-  if (direct_seconds != mul_by_one_second) {
+  // These are expected to differ by up to one tick due to fused multiply/add
+  // contraction.
+  if (absl::AbsDuration(direct_seconds - mul_by_one_second) >
+      absl::time_internal::MakeDuration(0, 1u)) {
     if (*misses > 10) return;
     ASSERT_LE(++(*misses), 10) << "Too many errors, not reporting more.";
     EXPECT_EQ(direct_seconds, mul_by_one_second)
@@ -1384,8 +1390,17 @@
 // For a variety of interesting durations, we find the exact point
 // where one double converts to that duration, and the very next double
 // converts to the next duration.  For both of those points, verify that
-// Seconds(point) returns the same duration as point * Seconds(1.0)
+// Seconds(point) returns a duration near point * Seconds(1.0). (They may
+// not be exactly equal due to fused multiply/add contraction.)
 TEST(Duration, ToDoubleSecondsCheckEdgeCases) {
+#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
+  // We're using an x87-compatible FPU, and intermediate operations can be
+  // performed with 80-bit floats. This means the edge cases are different than
+  // what we expect here, so just skip this test.
+  GTEST_SKIP()
+      << "Skipping the test because we detected x87 floating-point semantics";
+#endif
+
   constexpr uint32_t kTicksPerSecond = absl::time_internal::kTicksPerSecond;
   constexpr auto duration_tick = absl::time_internal::MakeDuration(0, 1u);
   int misses = 0;
@@ -1423,8 +1438,8 @@
         }
         // Now low_edge is the highest double that converts to Duration d,
         // and high_edge is the lowest double that converts to Duration after_d.
-        VerifySameAsMul(low_edge, &misses);
-        VerifySameAsMul(high_edge, &misses);
+        VerifyApproxSameAsMul(low_edge, &misses);
+        VerifyApproxSameAsMul(high_edge, &misses);
       }
     }
   }
@@ -1444,8 +1459,8 @@
   int misses = 0;
   for (int i = 0; i < 1000000; ++i) {
     double d = std::exp(uniform(gen));
-    VerifySameAsMul(d, &misses);
-    VerifySameAsMul(-d, &misses);
+    VerifyApproxSameAsMul(d, &misses);
+    VerifyApproxSameAsMul(-d, &misses);
   }
 }
 
@@ -1805,4 +1820,18 @@
 #undef TEST_PARSE_ROUNDTRIP
 }
 
+TEST(Duration, AbslStringify) {
+  // FormatDuration is already well tested, so just use one test case here to
+  // verify that StrFormat("%v", d) works as expected.
+  absl::Duration d = absl::Seconds(1);
+  EXPECT_EQ(absl::StrFormat("%v", d), absl::FormatDuration(d));
+}
+
+TEST(Duration, NoPadding) {
+  // Should match the size of a struct with uint32_t alignment and no padding.
+  using NoPadding = std::array<uint32_t, 3>;
+  EXPECT_EQ(sizeof(NoPadding), sizeof(absl::Duration));
+  EXPECT_EQ(alignof(NoPadding), alignof(absl::Duration));
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/time/flag_test.cc b/abseil-cpp/absl/time/flag_test.cc
new file mode 100644
index 0000000..8f8532b
--- /dev/null
+++ b/abseil-cpp/absl/time/flag_test.cc
@@ -0,0 +1,147 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/flags/flag.h"
+
+#include <string>
+
+#include "gtest/gtest.h"
+#include "absl/flags/reflection.h"
+#include "absl/time/civil_time.h"
+#include "absl/time/time.h"
+
+ABSL_FLAG(absl::CivilSecond, test_flag_civil_second,
+          absl::CivilSecond(2015, 1, 2, 3, 4, 5), "");
+ABSL_FLAG(absl::CivilMinute, test_flag_civil_minute,
+          absl::CivilMinute(2015, 1, 2, 3, 4), "");
+ABSL_FLAG(absl::CivilHour, test_flag_civil_hour, absl::CivilHour(2015, 1, 2, 3),
+          "");
+ABSL_FLAG(absl::CivilDay, test_flag_civil_day, absl::CivilDay(2015, 1, 2), "");
+ABSL_FLAG(absl::CivilMonth, test_flag_civil_month, absl::CivilMonth(2015, 1),
+          "");
+ABSL_FLAG(absl::CivilYear, test_flag_civil_year, absl::CivilYear(2015), "");
+
+ABSL_FLAG(absl::Duration, test_duration_flag, absl::Seconds(5),
+          "For testing support for Duration flags");
+ABSL_FLAG(absl::Time, test_time_flag, absl::InfinitePast(),
+          "For testing support for Time flags");
+
+namespace {
+
+bool SetFlagValue(absl::string_view flag_name, absl::string_view value) {
+  auto* flag = absl::FindCommandLineFlag(flag_name);
+  if (!flag) return false;
+  std::string err;
+  return flag->ParseFrom(value, &err);
+}
+
+bool GetFlagValue(absl::string_view flag_name, std::string& value) {
+  auto* flag = absl::FindCommandLineFlag(flag_name);
+  if (!flag) return false;
+  value = flag->CurrentValue();
+  return true;
+}
+
+TEST(CivilTime, FlagSupport) {
+  // Tests the default setting of the flags.
+  const absl::CivilSecond kDefaultSec(2015, 1, 2, 3, 4, 5);
+  EXPECT_EQ(absl::CivilSecond(kDefaultSec),
+            absl::GetFlag(FLAGS_test_flag_civil_second));
+  EXPECT_EQ(absl::CivilMinute(kDefaultSec),
+            absl::GetFlag(FLAGS_test_flag_civil_minute));
+  EXPECT_EQ(absl::CivilHour(kDefaultSec),
+            absl::GetFlag(FLAGS_test_flag_civil_hour));
+  EXPECT_EQ(absl::CivilDay(kDefaultSec),
+            absl::GetFlag(FLAGS_test_flag_civil_day));
+  EXPECT_EQ(absl::CivilMonth(kDefaultSec),
+            absl::GetFlag(FLAGS_test_flag_civil_month));
+  EXPECT_EQ(absl::CivilYear(kDefaultSec),
+            absl::GetFlag(FLAGS_test_flag_civil_year));
+
+  // Sets flags to a new value.
+  const absl::CivilSecond kNewSec(2016, 6, 7, 8, 9, 10);
+  absl::SetFlag(&FLAGS_test_flag_civil_second, absl::CivilSecond(kNewSec));
+  absl::SetFlag(&FLAGS_test_flag_civil_minute, absl::CivilMinute(kNewSec));
+  absl::SetFlag(&FLAGS_test_flag_civil_hour, absl::CivilHour(kNewSec));
+  absl::SetFlag(&FLAGS_test_flag_civil_day, absl::CivilDay(kNewSec));
+  absl::SetFlag(&FLAGS_test_flag_civil_month, absl::CivilMonth(kNewSec));
+  absl::SetFlag(&FLAGS_test_flag_civil_year, absl::CivilYear(kNewSec));
+
+  EXPECT_EQ(absl::CivilSecond(kNewSec),
+            absl::GetFlag(FLAGS_test_flag_civil_second));
+  EXPECT_EQ(absl::CivilMinute(kNewSec),
+            absl::GetFlag(FLAGS_test_flag_civil_minute));
+  EXPECT_EQ(absl::CivilHour(kNewSec),
+            absl::GetFlag(FLAGS_test_flag_civil_hour));
+  EXPECT_EQ(absl::CivilDay(kNewSec), absl::GetFlag(FLAGS_test_flag_civil_day));
+  EXPECT_EQ(absl::CivilMonth(kNewSec),
+            absl::GetFlag(FLAGS_test_flag_civil_month));
+  EXPECT_EQ(absl::CivilYear(kNewSec),
+            absl::GetFlag(FLAGS_test_flag_civil_year));
+}
+
+TEST(Duration, FlagSupport) {
+  EXPECT_EQ(absl::Seconds(5), absl::GetFlag(FLAGS_test_duration_flag));
+
+  absl::SetFlag(&FLAGS_test_duration_flag, absl::Seconds(10));
+  EXPECT_EQ(absl::Seconds(10), absl::GetFlag(FLAGS_test_duration_flag));
+
+  EXPECT_TRUE(SetFlagValue("test_duration_flag", "20s"));
+  EXPECT_EQ(absl::Seconds(20), absl::GetFlag(FLAGS_test_duration_flag));
+
+  std::string current_flag_value;
+  EXPECT_TRUE(GetFlagValue("test_duration_flag", current_flag_value));
+  EXPECT_EQ("20s", current_flag_value);
+}
+
+TEST(Time, FlagSupport) {
+  EXPECT_EQ(absl::InfinitePast(), absl::GetFlag(FLAGS_test_time_flag));
+
+  const absl::Time t = absl::FromCivil(absl::CivilSecond(2016, 1, 2, 3, 4, 5),
+                                       absl::UTCTimeZone());
+  absl::SetFlag(&FLAGS_test_time_flag, t);
+  EXPECT_EQ(t, absl::GetFlag(FLAGS_test_time_flag));
+
+  // Successful parse
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:06Z"));
+  EXPECT_EQ(t + absl::Seconds(1), absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:07.0Z"));
+  EXPECT_EQ(t + absl::Seconds(2), absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:08.000Z"));
+  EXPECT_EQ(t + absl::Seconds(3), absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:09+00:00"));
+  EXPECT_EQ(t + absl::Seconds(4), absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:05.123+00:00"));
+  EXPECT_EQ(t + absl::Milliseconds(123), absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:05.123+08:00"));
+  EXPECT_EQ(t + absl::Milliseconds(123) - absl::Hours(8),
+            absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "infinite-future"));
+  EXPECT_EQ(absl::InfiniteFuture(), absl::GetFlag(FLAGS_test_time_flag));
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "infinite-past"));
+  EXPECT_EQ(absl::InfinitePast(), absl::GetFlag(FLAGS_test_time_flag));
+
+  EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02T03:04:06"));
+  EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02"));
+  EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02Z"));
+  EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02+00:00"));
+  EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-99-99T03:04:06Z"));
+
+  EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:05Z"));
+  std::string current_flag_value;
+  EXPECT_TRUE(GetFlagValue("test_time_flag", current_flag_value));
+  EXPECT_EQ("2016-01-02T03:04:05+00:00", current_flag_value);
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/time/format.cc b/abseil-cpp/absl/time/format.cc
index 4005fb7..15a26b1 100644
--- a/abseil-cpp/absl/time/format.cc
+++ b/abseil-cpp/absl/time/format.cc
@@ -64,7 +64,8 @@
 // details about rep_hi and rep_lo.
 absl::Time Join(const cctz_parts& parts) {
   const int64_t rep_hi = (parts.sec - unix_epoch()).count();
-  const uint32_t rep_lo = parts.fem.count() / (1000 * 1000 / 4);
+  const uint32_t rep_lo =
+      static_cast<uint32_t>(parts.fem.count() / (1000 * 1000 / 4));
   const auto d = time_internal::MakeDuration(rep_hi, rep_lo);
   return time_internal::FromUnixDuration(d);
 }
diff --git a/abseil-cpp/absl/time/internal/cctz/BUILD.bazel b/abseil-cpp/absl/time/internal/cctz/BUILD.bazel
index 45a9529..4c5ad07 100644
--- a/abseil-cpp/absl/time/internal/cctz/BUILD.bazel
+++ b/abseil-cpp/absl/time/internal/cctz/BUILD.bazel
@@ -12,31 +12,10 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License.
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
-
 package(features = ["-parse_headers"])
 
 licenses(["notice"])
 
-filegroup(
-    name = "zoneinfo",
-    srcs = glob(["testdata/zoneinfo/**"]),
-)
-
-config_setting(
-    name = "osx",
-    constraint_values = [
-        "@bazel_tools//platforms:osx",
-    ],
-)
-
-config_setting(
-    name = "ios",
-    constraint_values = [
-        "@bazel_tools//platforms:ios",
-    ],
-)
-
 ### libraries
 
 cc_library(
@@ -75,19 +54,19 @@
         "include/cctz/zone_info_source.h",
     ],
     linkopts = select({
-        ":osx": [
-            "-framework Foundation",
-        ],
-        ":ios": [
-            "-framework Foundation",
-        ],
+        "@platforms//os:osx": ["-Wl,-framework,CoreFoundation"],
+        "@platforms//os:ios": ["-Wl,-framework,CoreFoundation"],
         "//conditions:default": [],
     }),
     visibility = ["//visibility:public"],
     deps = [
         ":civil_time",
         "//absl/base:config",
-    ],
+    ] + select(
+        {
+            "//conditions:default": [],
+        },
+    ),
 )
 
 ### tests
@@ -113,10 +92,12 @@
     size = "small",
     srcs = ["src/time_zone_format_test.cc"],
     data = [":zoneinfo"],
+    env = {"TZDIR": "absl/time/internal/cctz/testdata/zoneinfo"},
     tags = [
         "no_test_android_arm",
         "no_test_android_arm64",
         "no_test_android_x86",
+        "no_test_wasm",
     ],
     deps = [
         ":civil_time",
@@ -132,10 +113,12 @@
     timeout = "moderate",
     srcs = ["src/time_zone_lookup_test.cc"],
     data = [":zoneinfo"],
+    env = {"TZDIR": "absl/time/internal/cctz/testdata/zoneinfo"},
     tags = [
         "no_test_android_arm",
         "no_test_android_arm64",
         "no_test_android_x86",
+        "no_test_wasm",
     ],
     deps = [
         ":civil_time",
@@ -166,6 +149,12 @@
     ],
 )
 
+filegroup(
+    name = "zoneinfo",
+    srcs = glob(["testdata/zoneinfo/**"]),
+    visibility = ["//absl/time:__subpackages__"],
+)
+
 ### examples
 
 ### binaries
diff --git a/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h
index d1b4222..a5b084e 100644
--- a/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h
+++ b/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h
@@ -84,14 +84,13 @@
   return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
 }
 CONSTEXPR_F int year_index(year_t y, month_t m) noexcept {
-  return (static_cast<int>((y + (m > 2)) % 400) + 400) % 400;
+  const int yi = static_cast<int>((y + (m > 2)) % 400);
+  return yi < 0 ? yi + 400 : yi;
 }
-CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept {
-  const int yi = year_index(y, m);
+CONSTEXPR_F int days_per_century(int yi) noexcept {
   return 36524 + (yi == 0 || yi > 300);
 }
-CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept {
-  const int yi = year_index(y, m);
+CONSTEXPR_F int days_per_4years(int yi) noexcept {
   return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96);
 }
 CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept {
@@ -133,17 +132,22 @@
     }
   }
   if (d > 365) {
+    int yi = year_index(ey, m);  // Index into Gregorian 400 year cycle.
     for (;;) {
-      int n = days_per_century(ey, m);
+      int n = days_per_century(yi);
       if (d <= n) break;
       d -= n;
       ey += 100;
+      yi += 100;
+      if (yi >= 400) yi -= 400;
     }
     for (;;) {
-      int n = days_per_4years(ey, m);
+      int n = days_per_4years(yi);
       if (d <= n) break;
       d -= n;
       ey += 4;
+      yi += 4;
+      if (yi >= 400) yi -= 400;
     }
     for (;;) {
       int n = days_per_year(ey, m);
@@ -416,16 +420,10 @@
 
   // Assigning arithmetic.
   CONSTEXPR_M civil_time& operator+=(diff_t n) noexcept {
-    f_ = step(T{}, f_, n);
-    return *this;
+    return *this = *this + n;
   }
   CONSTEXPR_M civil_time& operator-=(diff_t n) noexcept {
-    if (n != (std::numeric_limits<diff_t>::min)()) {
-      f_ = step(T{}, f_, -n);
-    } else {
-      f_ = step(T{}, step(T{}, f_, -(n + 1)), 1);
-    }
-    return *this;
+    return *this = *this - n;
   }
   CONSTEXPR_M civil_time& operator++() noexcept { return *this += 1; }
   CONSTEXPR_M civil_time operator++(int) noexcept {
@@ -442,13 +440,15 @@
 
   // Binary arithmetic operators.
   friend CONSTEXPR_F civil_time operator+(civil_time a, diff_t n) noexcept {
-    return a += n;
+    return civil_time(step(T{}, a.f_, n));
   }
   friend CONSTEXPR_F civil_time operator+(diff_t n, civil_time a) noexcept {
-    return a += n;
+    return a + n;
   }
   friend CONSTEXPR_F civil_time operator-(civil_time a, diff_t n) noexcept {
-    return a -= n;
+    return n != (std::numeric_limits<diff_t>::min)()
+               ? civil_time(step(T{}, a.f_, -n))
+               : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1));
   }
   friend CONSTEXPR_F diff_t operator-(civil_time lhs, civil_time rhs) noexcept {
     return difference(T{}, lhs.f_, rhs.f_);
diff --git a/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h b/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h
index 5562a37..b2b0cf6 100644
--- a/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h
+++ b/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h
@@ -22,6 +22,8 @@
 
 #include <chrono>
 #include <cstdint>
+#include <limits>
+#include <ratio>  // NOLINT: We use std::ratio in this header
 #include <string>
 #include <utility>
 
@@ -41,20 +43,9 @@
 
 namespace detail {
 template <typename D>
-inline std::pair<time_point<seconds>, D> split_seconds(
-    const time_point<D>& tp) {
-  auto sec = std::chrono::time_point_cast<seconds>(tp);
-  auto sub = tp - sec;
-  if (sub.count() < 0) {
-    sec -= seconds(1);
-    sub += seconds(1);
-  }
-  return {sec, std::chrono::duration_cast<D>(sub)};
-}
-inline std::pair<time_point<seconds>, seconds> split_seconds(
-    const time_point<seconds>& tp) {
-  return {tp, seconds::zero()};
-}
+std::pair<time_point<seconds>, D> split_seconds(const time_point<D>& tp);
+std::pair<time_point<seconds>, seconds> split_seconds(
+    const time_point<seconds>& tp);
 }  // namespace detail
 
 // cctz::time_zone is an opaque, small, value-type class representing a
@@ -279,6 +270,20 @@
                    const femtoseconds&, const time_zone&);
 bool parse(const std::string&, const std::string&, const time_zone&,
            time_point<seconds>*, femtoseconds*, std::string* err = nullptr);
+template <typename Rep, std::intmax_t Denom>
+bool join_seconds(
+    const time_point<seconds>& sec, const femtoseconds& fs,
+    time_point<std::chrono::duration<Rep, std::ratio<1, Denom>>>* tpp);
+template <typename Rep, std::intmax_t Num>
+bool join_seconds(
+    const time_point<seconds>& sec, const femtoseconds& fs,
+    time_point<std::chrono::duration<Rep, std::ratio<Num, 1>>>* tpp);
+template <typename Rep>
+bool join_seconds(
+    const time_point<seconds>& sec, const femtoseconds& fs,
+    time_point<std::chrono::duration<Rep, std::ratio<1, 1>>>* tpp);
+bool join_seconds(const time_point<seconds>& sec, const femtoseconds&,
+                  time_point<seconds>* tpp);
 }  // namespace detail
 
 // Formats the given time_point in the given cctz::time_zone according to
@@ -369,15 +374,84 @@
                   const time_zone& tz, time_point<D>* tpp) {
   time_point<seconds> sec;
   detail::femtoseconds fs;
-  const bool b = detail::parse(fmt, input, tz, &sec, &fs);
-  if (b) {
-    // TODO: Return false if unrepresentable as a time_point<D>.
-    *tpp = std::chrono::time_point_cast<D>(sec);
-    *tpp += std::chrono::duration_cast<D>(fs);
-  }
-  return b;
+  return detail::parse(fmt, input, tz, &sec, &fs) &&
+         detail::join_seconds(sec, fs, tpp);
 }
 
+namespace detail {
+
+// Split a time_point<D> into a time_point<seconds> and a D subseconds.
+// Undefined behavior if time_point<seconds> is not of sufficient range.
+// Note that this means it is UB to call cctz::time_zone::lookup(tp) or
+// cctz::format(fmt, tp, tz) with a time_point that is outside the range
+// of a 64-bit std::time_t.
+template <typename D>
+std::pair<time_point<seconds>, D> split_seconds(const time_point<D>& tp) {
+  auto sec = std::chrono::time_point_cast<seconds>(tp);
+  auto sub = tp - sec;
+  if (sub.count() < 0) {
+    sec -= seconds(1);
+    sub += seconds(1);
+  }
+  return {sec, std::chrono::duration_cast<D>(sub)};
+}
+
+inline std::pair<time_point<seconds>, seconds> split_seconds(
+    const time_point<seconds>& tp) {
+  return {tp, seconds::zero()};
+}
+
+// Join a time_point<seconds> and femto subseconds into a time_point<D>.
+// Floors to the resolution of time_point<D>. Returns false if time_point<D>
+// is not of sufficient range.
+template <typename Rep, std::intmax_t Denom>
+bool join_seconds(
+    const time_point<seconds>& sec, const femtoseconds& fs,
+    time_point<std::chrono::duration<Rep, std::ratio<1, Denom>>>* tpp) {
+  using D = std::chrono::duration<Rep, std::ratio<1, Denom>>;
+  // TODO(#199): Return false if result unrepresentable as a time_point<D>.
+  *tpp = std::chrono::time_point_cast<D>(sec);
+  *tpp += std::chrono::duration_cast<D>(fs);
+  return true;
+}
+
+template <typename Rep, std::intmax_t Num>
+bool join_seconds(
+    const time_point<seconds>& sec, const femtoseconds&,
+    time_point<std::chrono::duration<Rep, std::ratio<Num, 1>>>* tpp) {
+  using D = std::chrono::duration<Rep, std::ratio<Num, 1>>;
+  auto count = sec.time_since_epoch().count();
+  if (count >= 0 || count % Num == 0) {
+    count /= Num;
+  } else {
+    count /= Num;
+    count -= 1;
+  }
+  if (count > (std::numeric_limits<Rep>::max)()) return false;
+  if (count < (std::numeric_limits<Rep>::min)()) return false;
+  *tpp = time_point<D>() + D{static_cast<Rep>(count)};
+  return true;
+}
+
+template <typename Rep>
+bool join_seconds(
+    const time_point<seconds>& sec, const femtoseconds&,
+    time_point<std::chrono::duration<Rep, std::ratio<1, 1>>>* tpp) {
+  using D = std::chrono::duration<Rep, std::ratio<1, 1>>;
+  auto count = sec.time_since_epoch().count();
+  if (count > (std::numeric_limits<Rep>::max)()) return false;
+  if (count < (std::numeric_limits<Rep>::min)()) return false;
+  *tpp = time_point<D>() + D{static_cast<Rep>(count)};
+  return true;
+}
+
+inline bool join_seconds(const time_point<seconds>& sec, const femtoseconds&,
+                         time_point<seconds>* tpp) {
+  *tpp = sec;
+  return true;
+}
+
+}  // namespace detail
 }  // namespace cctz
 }  // namespace time_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc b/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc
index 4e39188..c64f380 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc
@@ -554,6 +554,7 @@
                                       "Europe/Kaliningrad",
                                       "Europe/Kiev",
                                       "Europe/Kirov",
+                                      "Europe/Kyiv",
                                       "Europe/Lisbon",
                                       "Europe/Ljubljana",
                                       "Europe/London",
@@ -593,6 +594,7 @@
                                       "Europe/Zagreb",
                                       "Europe/Zaporozhye",
                                       "Europe/Zurich",
+                                      "Factory",
                                       "GB",
                                       "GB-Eire",
                                       "GMT",
@@ -648,6 +650,7 @@
                                       "Pacific/Guam",
                                       "Pacific/Honolulu",
                                       "Pacific/Johnston",
+                                      "Pacific/Kanton",
                                       "Pacific/Kiritimati",
                                       "Pacific/Kosrae",
                                       "Pacific/Kwajalein",
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc
index 303c024..e09654e 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc
@@ -53,7 +53,7 @@
 }  // namespace
 
 bool FixedOffsetFromName(const std::string& name, seconds* offset) {
-  if (name.compare(0, std::string::npos, "UTC", 3) == 0) {
+  if (name == "UTC" || name == "UTC0") {
     *offset = seconds::zero();
     return true;
   }
@@ -105,7 +105,7 @@
   offset_minutes %= 60;
   const std::size_t prefix_len = sizeof(kFixedZonePrefix) - 1;
   char buf[prefix_len + sizeof("-24:00:00")];
-  char* ep = std::copy(kFixedZonePrefix, kFixedZonePrefix + prefix_len, buf);
+  char* ep = std::copy_n(kFixedZonePrefix, prefix_len, buf);
   *ep++ = sign;
   ep = Format02d(ep, offset_hours);
   *ep++ = ':';
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc
index d8cb047..9b91f61 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc
@@ -13,14 +13,14 @@
 //   limitations under the License.
 
 #if !defined(HAS_STRPTIME)
-#if !defined(_MSC_VER) && !defined(__MINGW32__)
-#define HAS_STRPTIME 1  // assume everyone has strptime() except windows
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__VXWORKS__)
+#define HAS_STRPTIME 1  // Assume everyone else has strptime().
 #endif
 #endif
 
 #if defined(HAS_STRPTIME) && HAS_STRPTIME
-#if !defined(_XOPEN_SOURCE)
-#define _XOPEN_SOURCE  // Definedness suffices for strptime.
+#if !defined(_XOPEN_SOURCE) && !defined(__OpenBSD__)
+#define _XOPEN_SOURCE 500  // Exposes definitions for SUSv2 (UNIX 98).
 #endif
 #endif
 
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc
index a11f93e..4a6c71f 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc
@@ -13,15 +13,20 @@
 //   limitations under the License.
 
 #include <chrono>
+#include <cstdint>
 #include <iomanip>
 #include <sstream>
 #include <string>
 
+#include "absl/base/config.h"
+#include "absl/time/internal/cctz/include/cctz/time_zone.h"
+#if defined(__linux__)
+#include <features.h>
+#endif
+
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/base/config.h"
 #include "absl/time/internal/cctz/include/cctz/civil_time.h"
-#include "absl/time/internal/cctz/include/cctz/time_zone.h"
 
 namespace chrono = std::chrono;
 
@@ -59,10 +64,13 @@
 template <typename D>
 void TestFormatSpecifier(time_point<D> tp, time_zone tz, const std::string& fmt,
                          const std::string& ans) {
-  EXPECT_EQ(ans, format(fmt, tp, tz)) << fmt;
-  EXPECT_EQ("xxx " + ans, format("xxx " + fmt, tp, tz));
-  EXPECT_EQ(ans + " yyy", format(fmt + " yyy", tp, tz));
-  EXPECT_EQ("xxx " + ans + " yyy", format("xxx " + fmt + " yyy", tp, tz));
+  EXPECT_EQ(ans, absl::time_internal::cctz::format(fmt, tp, tz)) << fmt;
+  EXPECT_EQ("xxx " + ans,
+            absl::time_internal::cctz::format("xxx " + fmt, tp, tz));
+  EXPECT_EQ(ans + " yyy",
+            absl::time_internal::cctz::format(fmt + " yyy", tp, tz));
+  EXPECT_EQ("xxx " + ans + " yyy",
+            absl::time_internal::cctz::format("xxx " + fmt + " yyy", tp, tz));
 }
 
 }  // namespace
@@ -78,26 +86,29 @@
       chrono::system_clock::from_time_t(1420167845) +
       chrono::milliseconds(123) + chrono::microseconds(456) +
       chrono::nanoseconds(789);
-  EXPECT_EQ(
-      "03:04:05.123456789",
-      format(kFmt, chrono::time_point_cast<chrono::nanoseconds>(t0), utc));
-  EXPECT_EQ(
-      "03:04:05.123456",
-      format(kFmt, chrono::time_point_cast<chrono::microseconds>(t0), utc));
-  EXPECT_EQ(
-      "03:04:05.123",
-      format(kFmt, chrono::time_point_cast<chrono::milliseconds>(t0), utc));
+  EXPECT_EQ("03:04:05.123456789",
+            absl::time_internal::cctz::format(
+                kFmt, chrono::time_point_cast<chrono::nanoseconds>(t0), utc));
+  EXPECT_EQ("03:04:05.123456",
+            absl::time_internal::cctz::format(
+                kFmt, chrono::time_point_cast<chrono::microseconds>(t0), utc));
+  EXPECT_EQ("03:04:05.123",
+            absl::time_internal::cctz::format(
+                kFmt, chrono::time_point_cast<chrono::milliseconds>(t0), utc));
   EXPECT_EQ("03:04:05",
-            format(kFmt, chrono::time_point_cast<chrono::seconds>(t0), utc));
+            absl::time_internal::cctz::format(
+                kFmt, chrono::time_point_cast<chrono::seconds>(t0), utc));
   EXPECT_EQ(
       "03:04:05",
-      format(kFmt,
-             chrono::time_point_cast<absl::time_internal::cctz::seconds>(t0),
-             utc));
+      absl::time_internal::cctz::format(
+          kFmt, chrono::time_point_cast<absl::time_internal::cctz::seconds>(t0),
+          utc));
   EXPECT_EQ("03:04:00",
-            format(kFmt, chrono::time_point_cast<chrono::minutes>(t0), utc));
+            absl::time_internal::cctz::format(
+                kFmt, chrono::time_point_cast<chrono::minutes>(t0), utc));
   EXPECT_EQ("03:00:00",
-            format(kFmt, chrono::time_point_cast<chrono::hours>(t0), utc));
+            absl::time_internal::cctz::format(
+                kFmt, chrono::time_point_cast<chrono::hours>(t0), utc));
 }
 
 TEST(Format, TimePointExtendedResolution) {
@@ -132,24 +143,28 @@
   time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
 
   // Starts with a couple basic edge cases.
-  EXPECT_EQ("", format("", tp, tz));
-  EXPECT_EQ(" ", format(" ", tp, tz));
-  EXPECT_EQ("  ", format("  ", tp, tz));
-  EXPECT_EQ("xxx", format("xxx", tp, tz));
+  EXPECT_EQ("", absl::time_internal::cctz::format("", tp, tz));
+  EXPECT_EQ(" ", absl::time_internal::cctz::format(" ", tp, tz));
+  EXPECT_EQ("  ", absl::time_internal::cctz::format("  ", tp, tz));
+  EXPECT_EQ("xxx", absl::time_internal::cctz::format("xxx", tp, tz));
   std::string big(128, 'x');
-  EXPECT_EQ(big, format(big, tp, tz));
+  EXPECT_EQ(big, absl::time_internal::cctz::format(big, tp, tz));
   // Cause the 1024-byte buffer to grow.
   std::string bigger(100000, 'x');
-  EXPECT_EQ(bigger, format(bigger, tp, tz));
+  EXPECT_EQ(bigger, absl::time_internal::cctz::format(bigger, tp, tz));
 
   tp += chrono::hours(13) + chrono::minutes(4) + chrono::seconds(5);
   tp += chrono::milliseconds(6) + chrono::microseconds(7) +
         chrono::nanoseconds(8);
-  EXPECT_EQ("1970-01-01", format("%Y-%m-%d", tp, tz));
-  EXPECT_EQ("13:04:05", format("%H:%M:%S", tp, tz));
-  EXPECT_EQ("13:04:05.006", format("%H:%M:%E3S", tp, tz));
-  EXPECT_EQ("13:04:05.006007", format("%H:%M:%E6S", tp, tz));
-  EXPECT_EQ("13:04:05.006007008", format("%H:%M:%E9S", tp, tz));
+  EXPECT_EQ("1970-01-01",
+            absl::time_internal::cctz::format("%Y-%m-%d", tp, tz));
+  EXPECT_EQ("13:04:05", absl::time_internal::cctz::format("%H:%M:%S", tp, tz));
+  EXPECT_EQ("13:04:05.006",
+            absl::time_internal::cctz::format("%H:%M:%E3S", tp, tz));
+  EXPECT_EQ("13:04:05.006007",
+            absl::time_internal::cctz::format("%H:%M:%E6S", tp, tz));
+  EXPECT_EQ("13:04:05.006007008",
+            absl::time_internal::cctz::format("%H:%M:%E9S", tp, tz));
 }
 
 TEST(Format, PosixConversions) {
@@ -182,8 +197,10 @@
   TestFormatSpecifier(tp, tz, "%F", "1970-01-01");
   TestFormatSpecifier(tp, tz, "%g", "70");
   TestFormatSpecifier(tp, tz, "%G", "1970");
+#if defined(__GLIBC__)
   TestFormatSpecifier(tp, tz, "%k", " 0");
   TestFormatSpecifier(tp, tz, "%l", "12");
+#endif
   TestFormatSpecifier(tp, tz, "%n", "\n");
   TestFormatSpecifier(tp, tz, "%R", "00:00");
   TestFormatSpecifier(tp, tz, "%t", "\t");
@@ -204,7 +221,8 @@
   TestFormatSpecifier(tp, tz, "%B", "January");
 
   // %c should at least produce the numeric year and time-of-day.
-  const std::string s = format("%c", tp, utc_time_zone());
+  const std::string s =
+      absl::time_internal::cctz::format("%c", tp, utc_time_zone());
   EXPECT_THAT(s, testing::HasSubstr("1970"));
   EXPECT_THAT(s, testing::HasSubstr("00:00:00"));
 
@@ -215,7 +233,9 @@
 #if defined(__linux__)
   // SU/C99/TZ extensions
   TestFormatSpecifier(tp, tz, "%h", "Jan");  // Same as %b
+#if defined(__GLIBC__)
   TestFormatSpecifier(tp, tz, "%P", "am");
+#endif
   TestFormatSpecifier(tp, tz, "%r", "12:00:00 AM");
 
   // Modified conversion specifiers %E_
@@ -268,49 +288,61 @@
   // No subseconds.
   time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
   tp += chrono::seconds(5);
-  EXPECT_EQ("05", format("%E*S", tp, tz));
-  EXPECT_EQ("05", format("%E0S", tp, tz));
-  EXPECT_EQ("05.0", format("%E1S", tp, tz));
-  EXPECT_EQ("05.00", format("%E2S", tp, tz));
-  EXPECT_EQ("05.000", format("%E3S", tp, tz));
-  EXPECT_EQ("05.0000", format("%E4S", tp, tz));
-  EXPECT_EQ("05.00000", format("%E5S", tp, tz));
-  EXPECT_EQ("05.000000", format("%E6S", tp, tz));
-  EXPECT_EQ("05.0000000", format("%E7S", tp, tz));
-  EXPECT_EQ("05.00000000", format("%E8S", tp, tz));
-  EXPECT_EQ("05.000000000", format("%E9S", tp, tz));
-  EXPECT_EQ("05.0000000000", format("%E10S", tp, tz));
-  EXPECT_EQ("05.00000000000", format("%E11S", tp, tz));
-  EXPECT_EQ("05.000000000000", format("%E12S", tp, tz));
-  EXPECT_EQ("05.0000000000000", format("%E13S", tp, tz));
-  EXPECT_EQ("05.00000000000000", format("%E14S", tp, tz));
-  EXPECT_EQ("05.000000000000000", format("%E15S", tp, tz));
+  EXPECT_EQ("05", absl::time_internal::cctz::format("%E*S", tp, tz));
+  EXPECT_EQ("05", absl::time_internal::cctz::format("%E0S", tp, tz));
+  EXPECT_EQ("05.0", absl::time_internal::cctz::format("%E1S", tp, tz));
+  EXPECT_EQ("05.00", absl::time_internal::cctz::format("%E2S", tp, tz));
+  EXPECT_EQ("05.000", absl::time_internal::cctz::format("%E3S", tp, tz));
+  EXPECT_EQ("05.0000", absl::time_internal::cctz::format("%E4S", tp, tz));
+  EXPECT_EQ("05.00000", absl::time_internal::cctz::format("%E5S", tp, tz));
+  EXPECT_EQ("05.000000", absl::time_internal::cctz::format("%E6S", tp, tz));
+  EXPECT_EQ("05.0000000", absl::time_internal::cctz::format("%E7S", tp, tz));
+  EXPECT_EQ("05.00000000", absl::time_internal::cctz::format("%E8S", tp, tz));
+  EXPECT_EQ("05.000000000", absl::time_internal::cctz::format("%E9S", tp, tz));
+  EXPECT_EQ("05.0000000000",
+            absl::time_internal::cctz::format("%E10S", tp, tz));
+  EXPECT_EQ("05.00000000000",
+            absl::time_internal::cctz::format("%E11S", tp, tz));
+  EXPECT_EQ("05.000000000000",
+            absl::time_internal::cctz::format("%E12S", tp, tz));
+  EXPECT_EQ("05.0000000000000",
+            absl::time_internal::cctz::format("%E13S", tp, tz));
+  EXPECT_EQ("05.00000000000000",
+            absl::time_internal::cctz::format("%E14S", tp, tz));
+  EXPECT_EQ("05.000000000000000",
+            absl::time_internal::cctz::format("%E15S", tp, tz));
 
   // With subseconds.
   tp += chrono::milliseconds(6) + chrono::microseconds(7) +
         chrono::nanoseconds(8);
-  EXPECT_EQ("05.006007008", format("%E*S", tp, tz));
-  EXPECT_EQ("05", format("%E0S", tp, tz));
-  EXPECT_EQ("05.0", format("%E1S", tp, tz));
-  EXPECT_EQ("05.00", format("%E2S", tp, tz));
-  EXPECT_EQ("05.006", format("%E3S", tp, tz));
-  EXPECT_EQ("05.0060", format("%E4S", tp, tz));
-  EXPECT_EQ("05.00600", format("%E5S", tp, tz));
-  EXPECT_EQ("05.006007", format("%E6S", tp, tz));
-  EXPECT_EQ("05.0060070", format("%E7S", tp, tz));
-  EXPECT_EQ("05.00600700", format("%E8S", tp, tz));
-  EXPECT_EQ("05.006007008", format("%E9S", tp, tz));
-  EXPECT_EQ("05.0060070080", format("%E10S", tp, tz));
-  EXPECT_EQ("05.00600700800", format("%E11S", tp, tz));
-  EXPECT_EQ("05.006007008000", format("%E12S", tp, tz));
-  EXPECT_EQ("05.0060070080000", format("%E13S", tp, tz));
-  EXPECT_EQ("05.00600700800000", format("%E14S", tp, tz));
-  EXPECT_EQ("05.006007008000000", format("%E15S", tp, tz));
+  EXPECT_EQ("05.006007008", absl::time_internal::cctz::format("%E*S", tp, tz));
+  EXPECT_EQ("05", absl::time_internal::cctz::format("%E0S", tp, tz));
+  EXPECT_EQ("05.0", absl::time_internal::cctz::format("%E1S", tp, tz));
+  EXPECT_EQ("05.00", absl::time_internal::cctz::format("%E2S", tp, tz));
+  EXPECT_EQ("05.006", absl::time_internal::cctz::format("%E3S", tp, tz));
+  EXPECT_EQ("05.0060", absl::time_internal::cctz::format("%E4S", tp, tz));
+  EXPECT_EQ("05.00600", absl::time_internal::cctz::format("%E5S", tp, tz));
+  EXPECT_EQ("05.006007", absl::time_internal::cctz::format("%E6S", tp, tz));
+  EXPECT_EQ("05.0060070", absl::time_internal::cctz::format("%E7S", tp, tz));
+  EXPECT_EQ("05.00600700", absl::time_internal::cctz::format("%E8S", tp, tz));
+  EXPECT_EQ("05.006007008", absl::time_internal::cctz::format("%E9S", tp, tz));
+  EXPECT_EQ("05.0060070080",
+            absl::time_internal::cctz::format("%E10S", tp, tz));
+  EXPECT_EQ("05.00600700800",
+            absl::time_internal::cctz::format("%E11S", tp, tz));
+  EXPECT_EQ("05.006007008000",
+            absl::time_internal::cctz::format("%E12S", tp, tz));
+  EXPECT_EQ("05.0060070080000",
+            absl::time_internal::cctz::format("%E13S", tp, tz));
+  EXPECT_EQ("05.00600700800000",
+            absl::time_internal::cctz::format("%E14S", tp, tz));
+  EXPECT_EQ("05.006007008000000",
+            absl::time_internal::cctz::format("%E15S", tp, tz));
 
   // Times before the Unix epoch.
   tp = chrono::system_clock::from_time_t(0) + chrono::microseconds(-1);
   EXPECT_EQ("1969-12-31 23:59:59.999999",
-            format("%Y-%m-%d %H:%M:%E*S", tp, tz));
+            absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%E*S", tp, tz));
 
   // Here is a "%E*S" case we got wrong for a while.  While the first
   // instant below is correctly rendered as "...:07.333304", the second
@@ -318,10 +350,10 @@
   tp = chrono::system_clock::from_time_t(0) +
        chrono::microseconds(1395024427333304);
   EXPECT_EQ("2014-03-17 02:47:07.333304",
-            format("%Y-%m-%d %H:%M:%E*S", tp, tz));
+            absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%E*S", tp, tz));
   tp += chrono::microseconds(1);
   EXPECT_EQ("2014-03-17 02:47:07.333305",
-            format("%Y-%m-%d %H:%M:%E*S", tp, tz));
+            absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%E*S", tp, tz));
 }
 
 TEST(Format, ExtendedSubeconds) {
@@ -330,60 +362,69 @@
   // No subseconds.
   time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
   tp += chrono::seconds(5);
-  EXPECT_EQ("0", format("%E*f", tp, tz));
-  EXPECT_EQ("", format("%E0f", tp, tz));
-  EXPECT_EQ("0", format("%E1f", tp, tz));
-  EXPECT_EQ("00", format("%E2f", tp, tz));
-  EXPECT_EQ("000", format("%E3f", tp, tz));
-  EXPECT_EQ("0000", format("%E4f", tp, tz));
-  EXPECT_EQ("00000", format("%E5f", tp, tz));
-  EXPECT_EQ("000000", format("%E6f", tp, tz));
-  EXPECT_EQ("0000000", format("%E7f", tp, tz));
-  EXPECT_EQ("00000000", format("%E8f", tp, tz));
-  EXPECT_EQ("000000000", format("%E9f", tp, tz));
-  EXPECT_EQ("0000000000", format("%E10f", tp, tz));
-  EXPECT_EQ("00000000000", format("%E11f", tp, tz));
-  EXPECT_EQ("000000000000", format("%E12f", tp, tz));
-  EXPECT_EQ("0000000000000", format("%E13f", tp, tz));
-  EXPECT_EQ("00000000000000", format("%E14f", tp, tz));
-  EXPECT_EQ("000000000000000", format("%E15f", tp, tz));
+  EXPECT_EQ("0", absl::time_internal::cctz::format("%E*f", tp, tz));
+  EXPECT_EQ("", absl::time_internal::cctz::format("%E0f", tp, tz));
+  EXPECT_EQ("0", absl::time_internal::cctz::format("%E1f", tp, tz));
+  EXPECT_EQ("00", absl::time_internal::cctz::format("%E2f", tp, tz));
+  EXPECT_EQ("000", absl::time_internal::cctz::format("%E3f", tp, tz));
+  EXPECT_EQ("0000", absl::time_internal::cctz::format("%E4f", tp, tz));
+  EXPECT_EQ("00000", absl::time_internal::cctz::format("%E5f", tp, tz));
+  EXPECT_EQ("000000", absl::time_internal::cctz::format("%E6f", tp, tz));
+  EXPECT_EQ("0000000", absl::time_internal::cctz::format("%E7f", tp, tz));
+  EXPECT_EQ("00000000", absl::time_internal::cctz::format("%E8f", tp, tz));
+  EXPECT_EQ("000000000", absl::time_internal::cctz::format("%E9f", tp, tz));
+  EXPECT_EQ("0000000000", absl::time_internal::cctz::format("%E10f", tp, tz));
+  EXPECT_EQ("00000000000", absl::time_internal::cctz::format("%E11f", tp, tz));
+  EXPECT_EQ("000000000000", absl::time_internal::cctz::format("%E12f", tp, tz));
+  EXPECT_EQ("0000000000000",
+            absl::time_internal::cctz::format("%E13f", tp, tz));
+  EXPECT_EQ("00000000000000",
+            absl::time_internal::cctz::format("%E14f", tp, tz));
+  EXPECT_EQ("000000000000000",
+            absl::time_internal::cctz::format("%E15f", tp, tz));
 
   // With subseconds.
   tp += chrono::milliseconds(6) + chrono::microseconds(7) +
         chrono::nanoseconds(8);
-  EXPECT_EQ("006007008", format("%E*f", tp, tz));
-  EXPECT_EQ("", format("%E0f", tp, tz));
-  EXPECT_EQ("0", format("%E1f", tp, tz));
-  EXPECT_EQ("00", format("%E2f", tp, tz));
-  EXPECT_EQ("006", format("%E3f", tp, tz));
-  EXPECT_EQ("0060", format("%E4f", tp, tz));
-  EXPECT_EQ("00600", format("%E5f", tp, tz));
-  EXPECT_EQ("006007", format("%E6f", tp, tz));
-  EXPECT_EQ("0060070", format("%E7f", tp, tz));
-  EXPECT_EQ("00600700", format("%E8f", tp, tz));
-  EXPECT_EQ("006007008", format("%E9f", tp, tz));
-  EXPECT_EQ("0060070080", format("%E10f", tp, tz));
-  EXPECT_EQ("00600700800", format("%E11f", tp, tz));
-  EXPECT_EQ("006007008000", format("%E12f", tp, tz));
-  EXPECT_EQ("0060070080000", format("%E13f", tp, tz));
-  EXPECT_EQ("00600700800000", format("%E14f", tp, tz));
-  EXPECT_EQ("006007008000000", format("%E15f", tp, tz));
+  EXPECT_EQ("006007008", absl::time_internal::cctz::format("%E*f", tp, tz));
+  EXPECT_EQ("", absl::time_internal::cctz::format("%E0f", tp, tz));
+  EXPECT_EQ("0", absl::time_internal::cctz::format("%E1f", tp, tz));
+  EXPECT_EQ("00", absl::time_internal::cctz::format("%E2f", tp, tz));
+  EXPECT_EQ("006", absl::time_internal::cctz::format("%E3f", tp, tz));
+  EXPECT_EQ("0060", absl::time_internal::cctz::format("%E4f", tp, tz));
+  EXPECT_EQ("00600", absl::time_internal::cctz::format("%E5f", tp, tz));
+  EXPECT_EQ("006007", absl::time_internal::cctz::format("%E6f", tp, tz));
+  EXPECT_EQ("0060070", absl::time_internal::cctz::format("%E7f", tp, tz));
+  EXPECT_EQ("00600700", absl::time_internal::cctz::format("%E8f", tp, tz));
+  EXPECT_EQ("006007008", absl::time_internal::cctz::format("%E9f", tp, tz));
+  EXPECT_EQ("0060070080", absl::time_internal::cctz::format("%E10f", tp, tz));
+  EXPECT_EQ("00600700800", absl::time_internal::cctz::format("%E11f", tp, tz));
+  EXPECT_EQ("006007008000", absl::time_internal::cctz::format("%E12f", tp, tz));
+  EXPECT_EQ("0060070080000",
+            absl::time_internal::cctz::format("%E13f", tp, tz));
+  EXPECT_EQ("00600700800000",
+            absl::time_internal::cctz::format("%E14f", tp, tz));
+  EXPECT_EQ("006007008000000",
+            absl::time_internal::cctz::format("%E15f", tp, tz));
 
   // Times before the Unix epoch.
   tp = chrono::system_clock::from_time_t(0) + chrono::microseconds(-1);
-  EXPECT_EQ("1969-12-31 23:59:59.999999",
-            format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
+  EXPECT_EQ(
+      "1969-12-31 23:59:59.999999",
+      absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
 
   // Here is a "%E*S" case we got wrong for a while.  While the first
   // instant below is correctly rendered as "...:07.333304", the second
   // one used to appear as "...:07.33330499999999999".
   tp = chrono::system_clock::from_time_t(0) +
        chrono::microseconds(1395024427333304);
-  EXPECT_EQ("2014-03-17 02:47:07.333304",
-            format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
+  EXPECT_EQ(
+      "2014-03-17 02:47:07.333304",
+      absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
   tp += chrono::microseconds(1);
-  EXPECT_EQ("2014-03-17 02:47:07.333305",
-            format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
+  EXPECT_EQ(
+      "2014-03-17 02:47:07.333305",
+      absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
 }
 
 TEST(Format, CompareExtendSecondsVsSubseconds) {
@@ -399,15 +440,17 @@
   time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
   tp += chrono::seconds(5);
   // ... %E*S and %S.%E*f are different.
-  EXPECT_EQ("05", format(fmt_A("*"), tp, tz));
-  EXPECT_EQ("05.0", format(fmt_B("*"), tp, tz));
+  EXPECT_EQ("05", absl::time_internal::cctz::format(fmt_A("*"), tp, tz));
+  EXPECT_EQ("05.0", absl::time_internal::cctz::format(fmt_B("*"), tp, tz));
   // ... %E0S and %S.%E0f are different.
-  EXPECT_EQ("05", format(fmt_A("0"), tp, tz));
-  EXPECT_EQ("05.", format(fmt_B("0"), tp, tz));
+  EXPECT_EQ("05", absl::time_internal::cctz::format(fmt_A("0"), tp, tz));
+  EXPECT_EQ("05.", absl::time_internal::cctz::format(fmt_B("0"), tp, tz));
   // ... %E<prec>S and %S.%E<prec>f are the same for prec in [1:15].
   for (int prec = 1; prec <= 15; ++prec) {
-    const std::string a = format(fmt_A(std::to_string(prec)), tp, tz);
-    const std::string b = format(fmt_B(std::to_string(prec)), tp, tz);
+    const std::string a =
+        absl::time_internal::cctz::format(fmt_A(std::to_string(prec)), tp, tz);
+    const std::string b =
+        absl::time_internal::cctz::format(fmt_B(std::to_string(prec)), tp, tz);
     EXPECT_EQ(a, b) << "prec=" << prec;
   }
 
@@ -415,15 +458,19 @@
   // ... %E*S and %S.%E*f are the same.
   tp += chrono::milliseconds(6) + chrono::microseconds(7) +
         chrono::nanoseconds(8);
-  EXPECT_EQ("05.006007008", format(fmt_A("*"), tp, tz));
-  EXPECT_EQ("05.006007008", format(fmt_B("*"), tp, tz));
+  EXPECT_EQ("05.006007008",
+            absl::time_internal::cctz::format(fmt_A("*"), tp, tz));
+  EXPECT_EQ("05.006007008",
+            absl::time_internal::cctz::format(fmt_B("*"), tp, tz));
   // ... %E0S and %S.%E0f are different.
-  EXPECT_EQ("05", format(fmt_A("0"), tp, tz));
-  EXPECT_EQ("05.", format(fmt_B("0"), tp, tz));
+  EXPECT_EQ("05", absl::time_internal::cctz::format(fmt_A("0"), tp, tz));
+  EXPECT_EQ("05.", absl::time_internal::cctz::format(fmt_B("0"), tp, tz));
   // ... %E<prec>S and %S.%E<prec>f are the same for prec in [1:15].
   for (int prec = 1; prec <= 15; ++prec) {
-    const std::string a = format(fmt_A(std::to_string(prec)), tp, tz);
-    const std::string b = format(fmt_B(std::to_string(prec)), tp, tz);
+    const std::string a =
+        absl::time_internal::cctz::format(fmt_A(std::to_string(prec)), tp, tz);
+    const std::string b =
+        absl::time_internal::cctz::format(fmt_B(std::to_string(prec)), tp, tz);
     EXPECT_EQ(a, b) << "prec=" << prec;
   }
 }
@@ -596,31 +643,31 @@
 
   // %E4Y zero-pads the year to produce at least 4 chars, including the sign.
   auto tp = convert(civil_second(-999, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("-9991127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("-9991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(-99, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("-0991127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("-0991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(-9, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("-0091127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("-0091127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(-1, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("-0011127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("-0011127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(0, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("00001127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("00001127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(1, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("00011127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("00011127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(9, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("00091127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("00091127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(99, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("00991127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("00991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(999, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("09991127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("09991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(9999, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("99991127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("99991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
 
   // When the year is outside [-999:9999], more than 4 chars are produced.
   tp = convert(civil_second(-1000, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("-10001127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("-10001127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
   tp = convert(civil_second(10000, 11, 27, 0, 0, 0), utc);
-  EXPECT_EQ("100001127", format(e4y_fmt, tp, utc));
+  EXPECT_EQ("100001127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
 }
 
 TEST(Format, RFC3339Format) {
@@ -629,45 +676,64 @@
 
   time_point<chrono::nanoseconds> tp =
       convert(civil_second(1977, 6, 28, 9, 8, 7), tz);
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::milliseconds(100);
-  EXPECT_EQ("1977-06-28T09:08:07.1-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.1-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::milliseconds(20);
-  EXPECT_EQ("1977-06-28T09:08:07.12-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.12-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::milliseconds(3);
-  EXPECT_EQ("1977-06-28T09:08:07.123-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.123-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::microseconds(400);
-  EXPECT_EQ("1977-06-28T09:08:07.1234-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.1234-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::microseconds(50);
-  EXPECT_EQ("1977-06-28T09:08:07.12345-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.12345-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::microseconds(6);
-  EXPECT_EQ("1977-06-28T09:08:07.123456-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.123456-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::nanoseconds(700);
-  EXPECT_EQ("1977-06-28T09:08:07.1234567-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.1234567-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::nanoseconds(80);
-  EXPECT_EQ("1977-06-28T09:08:07.12345678-07:00", format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07.12345678-07:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 
   tp += chrono::nanoseconds(9);
   EXPECT_EQ("1977-06-28T09:08:07.123456789-07:00",
-            format(RFC3339_full, tp, tz));
-  EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+            absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+  EXPECT_EQ("1977-06-28T09:08:07-07:00",
+            absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
 }
 
 TEST(Format, RFC1123Format) {  // locale specific
@@ -675,36 +741,50 @@
   EXPECT_TRUE(load_time_zone("America/Los_Angeles", &tz));
 
   auto tp = convert(civil_second(1977, 6, 28, 9, 8, 7), tz);
-  EXPECT_EQ("Tue, 28 Jun 1977 09:08:07 -0700", format(RFC1123_full, tp, tz));
-  EXPECT_EQ("28 Jun 1977 09:08:07 -0700", format(RFC1123_no_wday, tp, tz));
+  EXPECT_EQ("Tue, 28 Jun 1977 09:08:07 -0700",
+            absl::time_internal::cctz::format(RFC1123_full, tp, tz));
+  EXPECT_EQ("28 Jun 1977 09:08:07 -0700",
+            absl::time_internal::cctz::format(RFC1123_no_wday, tp, tz));
 }
 
 TEST(Format, Week) {
   const time_zone utc = utc_time_zone();
 
   auto tp = convert(civil_second(2017, 1, 1, 0, 0, 0), utc);
-  EXPECT_EQ("2017-01-7", format("%Y-%U-%u", tp, utc));
-  EXPECT_EQ("2017-00-0", format("%Y-%W-%w", tp, utc));
+  EXPECT_EQ("2017-01-7",
+            absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+  EXPECT_EQ("2017-00-0",
+            absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
 
   tp = convert(civil_second(2017, 12, 31, 0, 0, 0), utc);
-  EXPECT_EQ("2017-53-7", format("%Y-%U-%u", tp, utc));
-  EXPECT_EQ("2017-52-0", format("%Y-%W-%w", tp, utc));
+  EXPECT_EQ("2017-53-7",
+            absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+  EXPECT_EQ("2017-52-0",
+            absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
 
   tp = convert(civil_second(2018, 1, 1, 0, 0, 0), utc);
-  EXPECT_EQ("2018-00-1", format("%Y-%U-%u", tp, utc));
-  EXPECT_EQ("2018-01-1", format("%Y-%W-%w", tp, utc));
+  EXPECT_EQ("2018-00-1",
+            absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+  EXPECT_EQ("2018-01-1",
+            absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
 
   tp = convert(civil_second(2018, 12, 31, 0, 0, 0), utc);
-  EXPECT_EQ("2018-52-1", format("%Y-%U-%u", tp, utc));
-  EXPECT_EQ("2018-53-1", format("%Y-%W-%w", tp, utc));
+  EXPECT_EQ("2018-52-1",
+            absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+  EXPECT_EQ("2018-53-1",
+            absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
 
   tp = convert(civil_second(2019, 1, 1, 0, 0, 0), utc);
-  EXPECT_EQ("2019-00-2", format("%Y-%U-%u", tp, utc));
-  EXPECT_EQ("2019-00-2", format("%Y-%W-%w", tp, utc));
+  EXPECT_EQ("2019-00-2",
+            absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+  EXPECT_EQ("2019-00-2",
+            absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
 
   tp = convert(civil_second(2019, 12, 31, 0, 0, 0), utc);
-  EXPECT_EQ("2019-52-2", format("%Y-%U-%u", tp, utc));
-  EXPECT_EQ("2019-52-2", format("%Y-%W-%w", tp, utc));
+  EXPECT_EQ("2019-52-2",
+            absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+  EXPECT_EQ("2019-52-2",
+            absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
 }
 
 //
@@ -717,39 +797,46 @@
 
   time_point<chrono::nanoseconds> tp_ns;
   EXPECT_TRUE(parse(kFmt, "03:04:05.123456789", utc, &tp_ns));
-  EXPECT_EQ("03:04:05.123456789", format(kFmt, tp_ns, utc));
+  EXPECT_EQ("03:04:05.123456789",
+            absl::time_internal::cctz::format(kFmt, tp_ns, utc));
   EXPECT_TRUE(parse(kFmt, "03:04:05.123456", utc, &tp_ns));
-  EXPECT_EQ("03:04:05.123456", format(kFmt, tp_ns, utc));
+  EXPECT_EQ("03:04:05.123456",
+            absl::time_internal::cctz::format(kFmt, tp_ns, utc));
 
   time_point<chrono::microseconds> tp_us;
   EXPECT_TRUE(parse(kFmt, "03:04:05.123456789", utc, &tp_us));
-  EXPECT_EQ("03:04:05.123456", format(kFmt, tp_us, utc));
+  EXPECT_EQ("03:04:05.123456",
+            absl::time_internal::cctz::format(kFmt, tp_us, utc));
   EXPECT_TRUE(parse(kFmt, "03:04:05.123456", utc, &tp_us));
-  EXPECT_EQ("03:04:05.123456", format(kFmt, tp_us, utc));
+  EXPECT_EQ("03:04:05.123456",
+            absl::time_internal::cctz::format(kFmt, tp_us, utc));
   EXPECT_TRUE(parse(kFmt, "03:04:05.123", utc, &tp_us));
-  EXPECT_EQ("03:04:05.123", format(kFmt, tp_us, utc));
+  EXPECT_EQ("03:04:05.123",
+            absl::time_internal::cctz::format(kFmt, tp_us, utc));
 
   time_point<chrono::milliseconds> tp_ms;
   EXPECT_TRUE(parse(kFmt, "03:04:05.123456", utc, &tp_ms));
-  EXPECT_EQ("03:04:05.123", format(kFmt, tp_ms, utc));
+  EXPECT_EQ("03:04:05.123",
+            absl::time_internal::cctz::format(kFmt, tp_ms, utc));
   EXPECT_TRUE(parse(kFmt, "03:04:05.123", utc, &tp_ms));
-  EXPECT_EQ("03:04:05.123", format(kFmt, tp_ms, utc));
+  EXPECT_EQ("03:04:05.123",
+            absl::time_internal::cctz::format(kFmt, tp_ms, utc));
   EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_ms));
-  EXPECT_EQ("03:04:05", format(kFmt, tp_ms, utc));
+  EXPECT_EQ("03:04:05", absl::time_internal::cctz::format(kFmt, tp_ms, utc));
 
   time_point<chrono::seconds> tp_s;
   EXPECT_TRUE(parse(kFmt, "03:04:05.123", utc, &tp_s));
-  EXPECT_EQ("03:04:05", format(kFmt, tp_s, utc));
+  EXPECT_EQ("03:04:05", absl::time_internal::cctz::format(kFmt, tp_s, utc));
   EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_s));
-  EXPECT_EQ("03:04:05", format(kFmt, tp_s, utc));
+  EXPECT_EQ("03:04:05", absl::time_internal::cctz::format(kFmt, tp_s, utc));
 
   time_point<chrono::minutes> tp_m;
   EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_m));
-  EXPECT_EQ("03:04:00", format(kFmt, tp_m, utc));
+  EXPECT_EQ("03:04:00", absl::time_internal::cctz::format(kFmt, tp_m, utc));
 
   time_point<chrono::hours> tp_h;
   EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_h));
-  EXPECT_EQ("03:00:00", format(kFmt, tp_h, utc));
+  EXPECT_EQ("03:00:00", absl::time_internal::cctz::format(kFmt, tp_h, utc));
 }
 
 TEST(Parse, TimePointExtendedResolution) {
@@ -1044,9 +1131,11 @@
   EXPECT_TRUE(parse("%h", "Feb", tz, &tp));
   EXPECT_EQ(2, convert(tp, tz).month());  // Equivalent to %b
 
+#if defined(__GLIBC__)
   tp = reset;
   EXPECT_TRUE(parse("%l %p", "5 PM", tz, &tp));
   EXPECT_EQ(17, convert(tp, tz).hour());
+#endif
 
   tp = reset;
   EXPECT_TRUE(parse("%r", "03:44:55 PM", tz, &tp));
@@ -1054,6 +1143,7 @@
   EXPECT_EQ(44, convert(tp, tz).minute());
   EXPECT_EQ(55, convert(tp, tz).second());
 
+#if defined(__GLIBC__)
   tp = reset;
   EXPECT_TRUE(parse("%Ec", "Tue Nov 19 05:06:07 2013", tz, &tp));
   EXPECT_EQ(convert(civil_second(2013, 11, 19, 5, 6, 7), tz), tp);
@@ -1125,6 +1215,7 @@
   EXPECT_TRUE(parse("%Oy", "04", tz, &tp));
   EXPECT_EQ(2004, convert(tp, tz).year());
 #endif
+#endif
 }
 
 TEST(Parse, ExtendedSeconds) {
@@ -1135,7 +1226,7 @@
   // All %E<prec>S cases are treated the same as %E*S on input.
   auto precisions = {"*", "0", "1",  "2",  "3",  "4",  "5",  "6", "7",
                      "8", "9", "10", "11", "12", "13", "14", "15"};
-  for (const std::string& prec : precisions) {
+  for (const std::string prec : precisions) {
     const std::string fmt = "%E" + prec + "S";
     SCOPED_TRACE(fmt);
     time_point<chrono::nanoseconds> tp = unix_epoch;
@@ -1217,7 +1308,7 @@
   // All %E<prec>f cases are treated the same as %E*f on input.
   auto precisions = {"*", "0", "1",  "2",  "3",  "4",  "5",  "6", "7",
                      "8", "9", "10", "11", "12", "13", "14", "15"};
-  for (const std::string& prec : precisions) {
+  for (const std::string prec : precisions) {
     const std::string fmt = "%E" + prec + "f";
     SCOPED_TRACE(fmt);
     time_point<chrono::nanoseconds> tp = unix_epoch - chrono::seconds(1);
@@ -1504,7 +1595,7 @@
       parse(RFC3339_sec, "292277026596-12-04T14:30:07-01:00", utc, &tp));
   EXPECT_EQ(tp, time_point<absl::time_internal::cctz::seconds>::max());
   EXPECT_FALSE(
-      parse(RFC3339_sec, "292277026596-12-04T15:30:07-01:00", utc, &tp));
+      parse(RFC3339_sec, "292277026596-12-04T14:30:08-01:00", utc, &tp));
 
   // tests the lower limit using +00:00 offset
   EXPECT_TRUE(
@@ -1525,10 +1616,86 @@
       parse(RFC3339_sec, "9223372036854775807-12-31T23:59:59-00:01", utc, &tp));
   EXPECT_FALSE(parse(RFC3339_sec, "-9223372036854775808-01-01T00:00:00+00:01",
                      utc, &tp));
+}
 
-  // TODO: Add tests that parsing times with fractional seconds overflow
-  // appropriately. This can't be done until cctz::parse() properly detects
-  // overflow when combining the chrono seconds and femto.
+TEST(Parse, TimePointOverflow) {
+  const time_zone utc = utc_time_zone();
+
+  using D = chrono::duration<std::int64_t, std::nano>;
+  time_point<D> tp;
+
+  EXPECT_TRUE(
+      parse(RFC3339_full, "2262-04-11T23:47:16.8547758079+00:00", utc, &tp));
+  EXPECT_EQ(tp, time_point<D>::max());
+  EXPECT_EQ("2262-04-11T23:47:16.854775807+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, utc));
+#if 0
+  // TODO(#199): Will fail until cctz::parse() properly detects overflow.
+  EXPECT_FALSE(
+      parse(RFC3339_full, "2262-04-11T23:47:16.8547758080+00:00", utc, &tp));
+  EXPECT_TRUE(
+      parse(RFC3339_full, "1677-09-21T00:12:43.1452241920+00:00", utc, &tp));
+  EXPECT_EQ(tp, time_point<D>::min());
+  EXPECT_EQ("1677-09-21T00:12:43.145224192+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, utc));
+  EXPECT_FALSE(
+      parse(RFC3339_full, "1677-09-21T00:12:43.1452241919+00:00", utc, &tp));
+#endif
+
+  using DS = chrono::duration<std::int8_t, chrono::seconds::period>;
+  time_point<DS> stp;
+
+  EXPECT_TRUE(parse(RFC3339_full, "1970-01-01T00:02:07.9+00:00", utc, &stp));
+  EXPECT_EQ(stp, time_point<DS>::max());
+  EXPECT_EQ("1970-01-01T00:02:07+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, stp, utc));
+  EXPECT_FALSE(parse(RFC3339_full, "1970-01-01T00:02:08+00:00", utc, &stp));
+
+  EXPECT_TRUE(parse(RFC3339_full, "1969-12-31T23:57:52+00:00", utc, &stp));
+  EXPECT_EQ(stp, time_point<DS>::min());
+  EXPECT_EQ("1969-12-31T23:57:52+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, stp, utc));
+  EXPECT_FALSE(parse(RFC3339_full, "1969-12-31T23:57:51.9+00:00", utc, &stp));
+
+  using DM = chrono::duration<std::int8_t, chrono::minutes::period>;
+  time_point<DM> mtp;
+
+  EXPECT_TRUE(parse(RFC3339_full, "1970-01-01T02:07:59+00:00", utc, &mtp));
+  EXPECT_EQ(mtp, time_point<DM>::max());
+  EXPECT_EQ("1970-01-01T02:07:00+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, mtp, utc));
+  EXPECT_FALSE(parse(RFC3339_full, "1970-01-01T02:08:00+00:00", utc, &mtp));
+
+  EXPECT_TRUE(parse(RFC3339_full, "1969-12-31T21:52:00+00:00", utc, &mtp));
+  EXPECT_EQ(mtp, time_point<DM>::min());
+  EXPECT_EQ("1969-12-31T21:52:00+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, mtp, utc));
+  EXPECT_FALSE(parse(RFC3339_full, "1969-12-31T21:51:59+00:00", utc, &mtp));
+}
+
+TEST(Parse, TimePointOverflowFloor) {
+  const time_zone utc = utc_time_zone();
+
+  using D = chrono::duration<std::int64_t, std::micro>;
+  time_point<D> tp;
+
+  EXPECT_TRUE(
+      parse(RFC3339_full, "294247-01-10T04:00:54.7758079+00:00", utc, &tp));
+  EXPECT_EQ(tp, time_point<D>::max());
+  EXPECT_EQ("294247-01-10T04:00:54.775807+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, utc));
+#if 0
+  // TODO(#199): Will fail until cctz::parse() properly detects overflow.
+  EXPECT_FALSE(
+      parse(RFC3339_full, "294247-01-10T04:00:54.7758080+00:00", utc, &tp));
+  EXPECT_TRUE(
+      parse(RFC3339_full, "-290308-12-21T19:59:05.2241920+00:00", utc, &tp));
+  EXPECT_EQ(tp, time_point<D>::min());
+  EXPECT_EQ("-290308-12-21T19:59:05.224192+00:00",
+            absl::time_internal::cctz::format(RFC3339_full, tp, utc));
+  EXPECT_FALSE(
+      parse(RFC3339_full, "-290308-12-21T19:59:05.2241919+00:00", utc, &tp));
+#endif
 }
 
 //
@@ -1544,7 +1711,8 @@
   // RFC3339, which renders subseconds.
   {
     time_point<chrono::nanoseconds> out;
-    const std::string s = format(RFC3339_full, in + subseconds, lax);
+    const std::string s =
+        absl::time_internal::cctz::format(RFC3339_full, in + subseconds, lax);
     EXPECT_TRUE(parse(RFC3339_full, s, lax, &out)) << s;
     EXPECT_EQ(in + subseconds, out);  // RFC3339_full includes %Ez
   }
@@ -1552,7 +1720,8 @@
   // RFC1123, which only does whole seconds.
   {
     time_point<chrono::nanoseconds> out;
-    const std::string s = format(RFC1123_full, in, lax);
+    const std::string s =
+        absl::time_internal::cctz::format(RFC1123_full, in, lax);
     EXPECT_TRUE(parse(RFC1123_full, s, lax, &out)) << s;
     EXPECT_EQ(in, out);  // RFC1123_full includes %z
   }
@@ -1570,7 +1739,7 @@
   {
     time_point<chrono::nanoseconds> out;
     time_zone utc = utc_time_zone();
-    const std::string s = format("%c", in, utc);
+    const std::string s = absl::time_internal::cctz::format("%c", in, utc);
     EXPECT_TRUE(parse("%c", s, utc, &out)) << s;
     EXPECT_EQ(in, out);
   }
@@ -1581,7 +1750,8 @@
   const time_zone utc = utc_time_zone();
   const time_point<absl::time_internal::cctz::seconds> in =
       time_point<absl::time_internal::cctz::seconds>::max();
-  const std::string s = format(RFC3339_full, in, utc);
+  const std::string s =
+      absl::time_internal::cctz::format(RFC3339_full, in, utc);
   time_point<absl::time_internal::cctz::seconds> out;
   EXPECT_TRUE(parse(RFC3339_full, s, utc, &out)) << s;
   EXPECT_EQ(in, out);
@@ -1591,7 +1761,8 @@
   const time_zone utc = utc_time_zone();
   const time_point<absl::time_internal::cctz::seconds> in =
       time_point<absl::time_internal::cctz::seconds>::min();
-  const std::string s = format(RFC3339_full, in, utc);
+  const std::string s =
+      absl::time_internal::cctz::format(RFC3339_full, in, utc);
   time_point<absl::time_internal::cctz::seconds> out;
   EXPECT_TRUE(parse(RFC3339_full, s, utc, &out)) << s;
   EXPECT_EQ(in, out);
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc
index 0319b2f..0e65cd9 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc
@@ -23,17 +23,19 @@
 namespace time_internal {
 namespace cctz {
 
-std::unique_ptr<TimeZoneIf> TimeZoneIf::Load(const std::string& name) {
+std::unique_ptr<TimeZoneIf> TimeZoneIf::UTC() { return TimeZoneInfo::UTC(); }
+
+std::unique_ptr<TimeZoneIf> TimeZoneIf::Make(const std::string& name) {
   // Support "libc:localtime" and "libc:*" to access the legacy
   // localtime and UTC support respectively from the C library.
+  // NOTE: The "libc:*" zones are internal, test-only interfaces, and
+  // are subject to change/removal without notice. Do not use them.
   if (name.compare(0, 5, "libc:") == 0) {
-    return std::unique_ptr<TimeZoneIf>(new TimeZoneLibC(name.substr(5)));
+    return TimeZoneLibC::Make(name.substr(5));
   }
 
-  // Otherwise use the "zoneinfo" implementation by default.
-  std::unique_ptr<TimeZoneInfo> tz(new TimeZoneInfo);
-  if (!tz->Load(name)) tz.reset();
-  return std::unique_ptr<TimeZoneIf>(tz.release());
+  // Otherwise use the "zoneinfo" implementation.
+  return TimeZoneInfo::Make(name);
 }
 
 // Defined out-of-line to avoid emitting a weak vtable in all TUs.
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h b/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h
index 32c0891..bec9beb 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h
@@ -33,8 +33,9 @@
 // Subclasses implement the functions for civil-time conversions in the zone.
 class TimeZoneIf {
  public:
-  // A factory function for TimeZoneIf implementations.
-  static std::unique_ptr<TimeZoneIf> Load(const std::string& name);
+  // Factory functions for TimeZoneIf implementations.
+  static std::unique_ptr<TimeZoneIf> UTC();  // never fails
+  static std::unique_ptr<TimeZoneIf> Make(const std::string& name);
 
   virtual ~TimeZoneIf();
 
@@ -51,12 +52,15 @@
   virtual std::string Description() const = 0;
 
  protected:
-  TimeZoneIf() {}
+  TimeZoneIf() = default;
+  TimeZoneIf(const TimeZoneIf&) = delete;
+  TimeZoneIf& operator=(const TimeZoneIf&) = delete;
 };
 
 // Convert between time_point<seconds> and a count of seconds since the
 // Unix epoch.  We assume that the std::chrono::system_clock and the
-// Unix clock are second aligned, but not that they share an epoch.
+// Unix clock are second aligned, and that the results are representable.
+// (That is, that they share an epoch, which is required since C++20.)
 inline std::int_fast64_t ToUnixSeconds(const time_point<seconds>& tp) {
   return (tp - std::chrono::time_point_cast<seconds>(
                    std::chrono::system_clock::from_time_t(0)))
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc
index f34e3ae..aadbb77 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc
@@ -99,11 +99,13 @@
   }
 }
 
+time_zone::Impl::Impl() : name_("UTC"), zone_(TimeZoneIf::UTC()) {}
+
 time_zone::Impl::Impl(const std::string& name)
-    : name_(name), zone_(TimeZoneIf::Load(name_)) {}
+    : name_(name), zone_(TimeZoneIf::Make(name_)) {}
 
 const time_zone::Impl* time_zone::Impl::UTCImpl() {
-  static const Impl* utc_impl = new Impl("UTC");  // never fails
+  static const Impl* utc_impl = new Impl;
   return utc_impl;
 }
 
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h b/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h
index 7d747ba..8308a3b 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h
@@ -78,7 +78,11 @@
   std::string Description() const { return zone_->Description(); }
 
  private:
+  Impl();
   explicit Impl(const std::string& name);
+  Impl(const Impl&) = delete;
+  Impl& operator=(const Impl&) = delete;
+
   static const Impl* UTCImpl();
 
   const std::string name_;
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc
index 8039353..f46198f 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc
@@ -39,10 +39,13 @@
 #include <cstdio>
 #include <cstdlib>
 #include <cstring>
+#include <fstream>
 #include <functional>
 #include <memory>
 #include <sstream>
 #include <string>
+#include <utility>
+#include <vector>
 
 #include "absl/base/config.h"
 #include "absl/time/internal/cctz/include/cctz/civil_time.h"
@@ -132,6 +135,64 @@
   return static_cast<std::int_fast64_t>(v - s64maxU - 1) - s64max - 1;
 }
 
+struct Header {            // counts of:
+  std::size_t timecnt;     // transition times
+  std::size_t typecnt;     // transition types
+  std::size_t charcnt;     // zone abbreviation characters
+  std::size_t leapcnt;     // leap seconds (we expect none)
+  std::size_t ttisstdcnt;  // UTC/local indicators (unused)
+  std::size_t ttisutcnt;   // standard/wall indicators (unused)
+
+  bool Build(const tzhead& tzh);
+  std::size_t DataLength(std::size_t time_len) const;
+};
+
+// Builds the in-memory header using the raw bytes from the file.
+bool Header::Build(const tzhead& tzh) {
+  std::int_fast32_t v;
+  if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
+  timecnt = static_cast<std::size_t>(v);
+  if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
+  typecnt = static_cast<std::size_t>(v);
+  if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
+  charcnt = static_cast<std::size_t>(v);
+  if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
+  leapcnt = static_cast<std::size_t>(v);
+  if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
+  ttisstdcnt = static_cast<std::size_t>(v);
+  if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
+  ttisutcnt = static_cast<std::size_t>(v);
+  return true;
+}
+
+// How many bytes of data are associated with this header. The result
+// depends upon whether this is a section with 4-byte or 8-byte times.
+std::size_t Header::DataLength(std::size_t time_len) const {
+  std::size_t len = 0;
+  len += (time_len + 1) * timecnt;  // unix_time + type_index
+  len += (4 + 1 + 1) * typecnt;     // utc_offset + is_dst + abbr_index
+  len += 1 * charcnt;               // abbreviations
+  len += (time_len + 4) * leapcnt;  // leap-time + TAI-UTC
+  len += 1 * ttisstdcnt;            // UTC/local indicators
+  len += 1 * ttisutcnt;             // standard/wall indicators
+  return len;
+}
+
+// Does the rule for future transitions call for year-round daylight time?
+// See tz/zic.c:stringzone() for the details on how such rules are encoded.
+bool AllYearDST(const PosixTimeZone& posix) {
+  if (posix.dst_start.date.fmt != PosixTransition::N) return false;
+  if (posix.dst_start.date.n.day != 0) return false;
+  if (posix.dst_start.time.offset != 0) return false;
+
+  if (posix.dst_end.date.fmt != PosixTransition::J) return false;
+  if (posix.dst_end.date.j.day != kDaysPerYear[0]) return false;
+  const auto offset = posix.std_offset - posix.dst_offset;
+  if (posix.dst_end.time.offset + offset != kSecsPerDay) return false;
+
+  return true;
+}
+
 // Generate a year-relative offset for a PosixTransition.
 std::int_fast64_t TransOffset(bool leap_year, int jan1_weekday,
                               const PosixTransition& pt) {
@@ -200,6 +261,321 @@
 
 }  // namespace
 
+// Find/make a transition type with these attributes.
+bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
+                                     const std::string& abbr,
+                                     std::uint_least8_t* index) {
+  std::size_t type_index = 0;
+  std::size_t abbr_index = abbreviations_.size();
+  for (; type_index != transition_types_.size(); ++type_index) {
+    const TransitionType& tt(transition_types_[type_index]);
+    const char* tt_abbr = &abbreviations_[tt.abbr_index];
+    if (tt_abbr == abbr) abbr_index = tt.abbr_index;
+    if (tt.utc_offset == utc_offset && tt.is_dst == is_dst) {
+      if (abbr_index == tt.abbr_index) break;  // reuse
+    }
+  }
+  if (type_index > 255 || abbr_index > 255) {
+    // No index space (8 bits) available for a new type or abbreviation.
+    return false;
+  }
+  if (type_index == transition_types_.size()) {
+    TransitionType& tt(*transition_types_.emplace(transition_types_.end()));
+    tt.utc_offset = static_cast<std::int_least32_t>(utc_offset);
+    tt.is_dst = is_dst;
+    if (abbr_index == abbreviations_.size()) {
+      abbreviations_.append(abbr);
+      abbreviations_.append(1, '\0');
+    }
+    tt.abbr_index = static_cast<std::uint_least8_t>(abbr_index);
+  }
+  *index = static_cast<std::uint_least8_t>(type_index);
+  return true;
+}
+
+// zic(8) can generate no-op transitions when a zone changes rules at an
+// instant when there is actually no discontinuity.  So we check whether
+// two transitions have equivalent types (same offset/is_dst/abbr).
+bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
+                                    std::uint_fast8_t tt2_index) const {
+  if (tt1_index == tt2_index) return true;
+  const TransitionType& tt1(transition_types_[tt1_index]);
+  const TransitionType& tt2(transition_types_[tt2_index]);
+  if (tt1.utc_offset != tt2.utc_offset) return false;
+  if (tt1.is_dst != tt2.is_dst) return false;
+  if (tt1.abbr_index != tt2.abbr_index) return false;
+  return true;
+}
+
+// Use the POSIX-TZ-environment-variable-style string to handle times
+// in years after the last transition stored in the zoneinfo data.
+bool TimeZoneInfo::ExtendTransitions() {
+  extended_ = false;
+  if (future_spec_.empty()) return true;  // last transition prevails
+
+  PosixTimeZone posix;
+  if (!ParsePosixSpec(future_spec_, &posix)) return false;
+
+  // Find transition type for the future std specification.
+  std::uint_least8_t std_ti;
+  if (!GetTransitionType(posix.std_offset, false, posix.std_abbr, &std_ti))
+    return false;
+
+  if (posix.dst_abbr.empty()) {  // std only
+    // The future specification should match the last transition, and
+    // that means that handling the future will fall out naturally.
+    return EquivTransitions(transitions_.back().type_index, std_ti);
+  }
+
+  // Find transition type for the future dst specification.
+  std::uint_least8_t dst_ti;
+  if (!GetTransitionType(posix.dst_offset, true, posix.dst_abbr, &dst_ti))
+    return false;
+
+  if (AllYearDST(posix)) {  // dst only
+    // The future specification should match the last transition, and
+    // that means that handling the future will fall out naturally.
+    return EquivTransitions(transitions_.back().type_index, dst_ti);
+  }
+
+  // Extend the transitions for an additional 401 years using the future
+  // specification. Years beyond those can be handled by mapping back to
+  // a cycle-equivalent year within that range. Note that we need 401
+  // (well, at least the first transition in the 401st year) so that the
+  // end of the 400th year is mapped back to an extended year. And first
+  // we may also need two additional transitions for the current year.
+  transitions_.reserve(transitions_.size() + 2 + 401 * 2);
+  extended_ = true;
+
+  const Transition& last(transitions_.back());
+  const std::int_fast64_t last_time = last.unix_time;
+  const TransitionType& last_tt(transition_types_[last.type_index]);
+  last_year_ = LocalTime(last_time, last_tt).cs.year();
+  bool leap_year = IsLeap(last_year_);
+  const civil_second jan1(last_year_);
+  std::int_fast64_t jan1_time = jan1 - civil_second();
+  int jan1_weekday = ToPosixWeekday(get_weekday(jan1));
+
+  Transition dst = {0, dst_ti, civil_second(), civil_second()};
+  Transition std = {0, std_ti, civil_second(), civil_second()};
+  for (const year_t limit = last_year_ + 401;; ++last_year_) {
+    auto dst_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_start);
+    auto std_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_end);
+    dst.unix_time = jan1_time + dst_trans_off - posix.std_offset;
+    std.unix_time = jan1_time + std_trans_off - posix.dst_offset;
+    const auto* ta = dst.unix_time < std.unix_time ? &dst : &std;
+    const auto* tb = dst.unix_time < std.unix_time ? &std : &dst;
+    if (last_time < tb->unix_time) {
+      if (last_time < ta->unix_time) transitions_.push_back(*ta);
+      transitions_.push_back(*tb);
+    }
+    if (last_year_ == limit) break;
+    jan1_time += kSecsPerYear[leap_year];
+    jan1_weekday = (jan1_weekday + kDaysPerYear[leap_year]) % 7;
+    leap_year = !leap_year && IsLeap(last_year_ + 1);
+  }
+
+  return true;
+}
+
+namespace {
+
+using FilePtr = std::unique_ptr<FILE, int (*)(FILE*)>;
+
+// fopen(3) adaptor.
+inline FilePtr FOpen(const char* path, const char* mode) {
+#if defined(_MSC_VER)
+  FILE* fp;
+  if (fopen_s(&fp, path, mode) != 0) fp = nullptr;
+  return FilePtr(fp, fclose);
+#else
+  // TODO: Enable the close-on-exec flag.
+  return FilePtr(fopen(path, mode), fclose);
+#endif
+}
+
+// A stdio(3)-backed implementation of ZoneInfoSource.
+class FileZoneInfoSource : public ZoneInfoSource {
+ public:
+  static std::unique_ptr<ZoneInfoSource> Open(const std::string& name);
+
+  std::size_t Read(void* ptr, std::size_t size) override {
+    size = std::min(size, len_);
+    std::size_t nread = fread(ptr, 1, size, fp_.get());
+    len_ -= nread;
+    return nread;
+  }
+  int Skip(std::size_t offset) override {
+    offset = std::min(offset, len_);
+    int rc = fseek(fp_.get(), static_cast<long>(offset), SEEK_CUR);
+    if (rc == 0) len_ -= offset;
+    return rc;
+  }
+  std::string Version() const override {
+    // TODO: It would nice if the zoneinfo data included the tzdb version.
+    return std::string();
+  }
+
+ protected:
+  explicit FileZoneInfoSource(
+      FilePtr fp, std::size_t len = std::numeric_limits<std::size_t>::max())
+      : fp_(std::move(fp)), len_(len) {}
+
+ private:
+  FilePtr fp_;
+  std::size_t len_;
+};
+
+std::unique_ptr<ZoneInfoSource> FileZoneInfoSource::Open(
+    const std::string& name) {
+  // Use of the "file:" prefix is intended for testing purposes only.
+  const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
+
+  // Map the time-zone name to a path name.
+  std::string path;
+  if (pos == name.size() || name[pos] != '/') {
+    const char* tzdir = "/usr/share/zoneinfo";
+    char* tzdir_env = nullptr;
+#if defined(_MSC_VER)
+    _dupenv_s(&tzdir_env, nullptr, "TZDIR");
+#else
+    tzdir_env = std::getenv("TZDIR");
+#endif
+    if (tzdir_env && *tzdir_env) tzdir = tzdir_env;
+    path += tzdir;
+    path += '/';
+#if defined(_MSC_VER)
+    free(tzdir_env);
+#endif
+  }
+  path.append(name, pos, std::string::npos);
+
+  // Open the zoneinfo file.
+  auto fp = FOpen(path.c_str(), "rb");
+  if (fp == nullptr) return nullptr;
+  return std::unique_ptr<ZoneInfoSource>(new FileZoneInfoSource(std::move(fp)));
+}
+
+class AndroidZoneInfoSource : public FileZoneInfoSource {
+ public:
+  static std::unique_ptr<ZoneInfoSource> Open(const std::string& name);
+  std::string Version() const override { return version_; }
+
+ private:
+  explicit AndroidZoneInfoSource(FilePtr fp, std::size_t len,
+                                 std::string version)
+      : FileZoneInfoSource(std::move(fp), len), version_(std::move(version)) {}
+  std::string version_;
+};
+
+std::unique_ptr<ZoneInfoSource> AndroidZoneInfoSource::Open(
+    const std::string& name) {
+  // Use of the "file:" prefix is intended for testing purposes only.
+  const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
+
+  // See Android's libc/tzcode/bionic.cpp for additional information.
+  for (const char* tzdata : {"/data/misc/zoneinfo/current/tzdata",
+                             "/system/usr/share/zoneinfo/tzdata"}) {
+    auto fp = FOpen(tzdata, "rb");
+    if (fp == nullptr) continue;
+
+    char hbuf[24];  // covers header.zonetab_offset too
+    if (fread(hbuf, 1, sizeof(hbuf), fp.get()) != sizeof(hbuf)) continue;
+    if (strncmp(hbuf, "tzdata", 6) != 0) continue;
+    const char* vers = (hbuf[11] == '\0') ? hbuf + 6 : "";
+    const std::int_fast32_t index_offset = Decode32(hbuf + 12);
+    const std::int_fast32_t data_offset = Decode32(hbuf + 16);
+    if (index_offset < 0 || data_offset < index_offset) continue;
+    if (fseek(fp.get(), static_cast<long>(index_offset), SEEK_SET) != 0)
+      continue;
+
+    char ebuf[52];  // covers entry.unused too
+    const std::size_t index_size =
+        static_cast<std::size_t>(data_offset - index_offset);
+    const std::size_t zonecnt = index_size / sizeof(ebuf);
+    if (zonecnt * sizeof(ebuf) != index_size) continue;
+    for (std::size_t i = 0; i != zonecnt; ++i) {
+      if (fread(ebuf, 1, sizeof(ebuf), fp.get()) != sizeof(ebuf)) break;
+      const std::int_fast32_t start = data_offset + Decode32(ebuf + 40);
+      const std::int_fast32_t length = Decode32(ebuf + 44);
+      if (start < 0 || length < 0) break;
+      ebuf[40] = '\0';  // ensure zone name is NUL terminated
+      if (strcmp(name.c_str() + pos, ebuf) == 0) {
+        if (fseek(fp.get(), static_cast<long>(start), SEEK_SET) != 0) break;
+        return std::unique_ptr<ZoneInfoSource>(new AndroidZoneInfoSource(
+            std::move(fp), static_cast<std::size_t>(length), vers));
+      }
+    }
+  }
+
+  return nullptr;
+}
+
+// A zoneinfo source for use inside Fuchsia components. This attempts to
+// read zoneinfo files from one of several known paths in a component's
+// incoming namespace. [Config data][1] is preferred, but package-specific
+// resources are also supported.
+//
+// Fuchsia's implementation supports `FileZoneInfoSource::Version()`.
+//
+// [1]:
+// https://fuchsia.dev/fuchsia-src/development/components/data#using_config_data_in_your_component
+class FuchsiaZoneInfoSource : public FileZoneInfoSource {
+ public:
+  static std::unique_ptr<ZoneInfoSource> Open(const std::string& name);
+  std::string Version() const override { return version_; }
+
+ private:
+  explicit FuchsiaZoneInfoSource(FilePtr fp, std::string version)
+      : FileZoneInfoSource(std::move(fp)), version_(std::move(version)) {}
+  std::string version_;
+};
+
+std::unique_ptr<ZoneInfoSource> FuchsiaZoneInfoSource::Open(
+    const std::string& name) {
+  // Use of the "file:" prefix is intended for testing purposes only.
+  const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
+
+  // Prefixes where a Fuchsia component might find zoneinfo files,
+  // in descending order of preference.
+  const auto kTzdataPrefixes = {
+      "/config/data/tzdata/",
+      "/pkg/data/tzdata/",
+      "/data/tzdata/",
+  };
+  const auto kEmptyPrefix = {""};
+  const bool name_absolute = (pos != name.size() && name[pos] == '/');
+  const auto prefixes = name_absolute ? kEmptyPrefix : kTzdataPrefixes;
+
+  // Fuchsia builds place zoneinfo files at "<prefix><format><name>".
+  for (const std::string prefix : prefixes) {
+    std::string path = prefix;
+    if (!prefix.empty()) path += "zoneinfo/tzif2/";  // format
+    path.append(name, pos, std::string::npos);
+
+    auto fp = FOpen(path.c_str(), "rb");
+    if (fp == nullptr) continue;
+
+    std::string version;
+    if (!prefix.empty()) {
+      // Fuchsia builds place the version in "<prefix>revision.txt".
+      std::ifstream version_stream(prefix + "revision.txt");
+      if (version_stream.is_open()) {
+        // revision.txt should contain no newlines, but to be
+        // defensive we read just the first line.
+        std::getline(version_stream, version);
+      }
+    }
+
+    return std::unique_ptr<ZoneInfoSource>(
+        new FuchsiaZoneInfoSource(std::move(fp), std::move(version)));
+  }
+
+  return nullptr;
+}
+
+}  // namespace
+
 // What (no leap-seconds) UTC+seconds zoneinfo would look like.
 bool TimeZoneInfo::ResetToBuiltinUTC(const seconds& offset) {
   transition_types_.resize(1);
@@ -247,146 +623,6 @@
   return true;
 }
 
-// Builds the in-memory header using the raw bytes from the file.
-bool TimeZoneInfo::Header::Build(const tzhead& tzh) {
-  std::int_fast32_t v;
-  if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
-  timecnt = static_cast<std::size_t>(v);
-  if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
-  typecnt = static_cast<std::size_t>(v);
-  if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
-  charcnt = static_cast<std::size_t>(v);
-  if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
-  leapcnt = static_cast<std::size_t>(v);
-  if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
-  ttisstdcnt = static_cast<std::size_t>(v);
-  if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
-  ttisutcnt = static_cast<std::size_t>(v);
-  return true;
-}
-
-// How many bytes of data are associated with this header. The result
-// depends upon whether this is a section with 4-byte or 8-byte times.
-std::size_t TimeZoneInfo::Header::DataLength(std::size_t time_len) const {
-  std::size_t len = 0;
-  len += (time_len + 1) * timecnt;  // unix_time + type_index
-  len += (4 + 1 + 1) * typecnt;     // utc_offset + is_dst + abbr_index
-  len += 1 * charcnt;               // abbreviations
-  len += (time_len + 4) * leapcnt;  // leap-time + TAI-UTC
-  len += 1 * ttisstdcnt;            // UTC/local indicators
-  len += 1 * ttisutcnt;             // standard/wall indicators
-  return len;
-}
-
-// zic(8) can generate no-op transitions when a zone changes rules at an
-// instant when there is actually no discontinuity.  So we check whether
-// two transitions have equivalent types (same offset/is_dst/abbr).
-bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
-                                    std::uint_fast8_t tt2_index) const {
-  if (tt1_index == tt2_index) return true;
-  const TransitionType& tt1(transition_types_[tt1_index]);
-  const TransitionType& tt2(transition_types_[tt2_index]);
-  if (tt1.utc_offset != tt2.utc_offset) return false;
-  if (tt1.is_dst != tt2.is_dst) return false;
-  if (tt1.abbr_index != tt2.abbr_index) return false;
-  return true;
-}
-
-// Find/make a transition type with these attributes.
-bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
-                                     const std::string& abbr,
-                                     std::uint_least8_t* index) {
-  std::size_t type_index = 0;
-  std::size_t abbr_index = abbreviations_.size();
-  for (; type_index != transition_types_.size(); ++type_index) {
-    const TransitionType& tt(transition_types_[type_index]);
-    const char* tt_abbr = &abbreviations_[tt.abbr_index];
-    if (tt_abbr == abbr) abbr_index = tt.abbr_index;
-    if (tt.utc_offset == utc_offset && tt.is_dst == is_dst) {
-      if (abbr_index == tt.abbr_index) break;  // reuse
-    }
-  }
-  if (type_index > 255 || abbr_index > 255) {
-    // No index space (8 bits) available for a new type or abbreviation.
-    return false;
-  }
-  if (type_index == transition_types_.size()) {
-    TransitionType& tt(*transition_types_.emplace(transition_types_.end()));
-    tt.utc_offset = static_cast<std::int_least32_t>(utc_offset);
-    tt.is_dst = is_dst;
-    if (abbr_index == abbreviations_.size()) {
-      abbreviations_.append(abbr);
-      abbreviations_.append(1, '\0');
-    }
-    tt.abbr_index = static_cast<std::uint_least8_t>(abbr_index);
-  }
-  *index = static_cast<std::uint_least8_t>(type_index);
-  return true;
-}
-
-// Use the POSIX-TZ-environment-variable-style string to handle times
-// in years after the last transition stored in the zoneinfo data.
-bool TimeZoneInfo::ExtendTransitions() {
-  extended_ = false;
-  if (future_spec_.empty()) return true;  // last transition prevails
-
-  PosixTimeZone posix;
-  if (!ParsePosixSpec(future_spec_, &posix)) return false;
-
-  // Find transition type for the future std specification.
-  std::uint_least8_t std_ti;
-  if (!GetTransitionType(posix.std_offset, false, posix.std_abbr, &std_ti))
-    return false;
-
-  if (posix.dst_abbr.empty()) {  // std only
-    // The future specification should match the last transition, and
-    // that means that handling the future will fall out naturally.
-    return EquivTransitions(transitions_.back().type_index, std_ti);
-  }
-
-  // Find transition type for the future dst specification.
-  std::uint_least8_t dst_ti;
-  if (!GetTransitionType(posix.dst_offset, true, posix.dst_abbr, &dst_ti))
-    return false;
-
-  // Extend the transitions for an additional 400 years using the
-  // future specification. Years beyond those can be handled by
-  // mapping back to a cycle-equivalent year within that range.
-  // We may need two additional transitions for the current year.
-  transitions_.reserve(transitions_.size() + 400 * 2 + 2);
-  extended_ = true;
-
-  const Transition& last(transitions_.back());
-  const std::int_fast64_t last_time = last.unix_time;
-  const TransitionType& last_tt(transition_types_[last.type_index]);
-  last_year_ = LocalTime(last_time, last_tt).cs.year();
-  bool leap_year = IsLeap(last_year_);
-  const civil_second jan1(last_year_);
-  std::int_fast64_t jan1_time = jan1 - civil_second();
-  int jan1_weekday = ToPosixWeekday(get_weekday(jan1));
-
-  Transition dst = {0, dst_ti, civil_second(), civil_second()};
-  Transition std = {0, std_ti, civil_second(), civil_second()};
-  for (const year_t limit = last_year_ + 400;; ++last_year_) {
-    auto dst_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_start);
-    auto std_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_end);
-    dst.unix_time = jan1_time + dst_trans_off - posix.std_offset;
-    std.unix_time = jan1_time + std_trans_off - posix.dst_offset;
-    const auto* ta = dst.unix_time < std.unix_time ? &dst : &std;
-    const auto* tb = dst.unix_time < std.unix_time ? &std : &dst;
-    if (last_time < tb->unix_time) {
-      if (last_time < ta->unix_time) transitions_.push_back(*ta);
-      transitions_.push_back(*tb);
-    }
-    if (last_year_ == limit) break;
-    jan1_time += kSecsPerYear[leap_year];
-    jan1_weekday = (jan1_weekday + kDaysPerYear[leap_year]) % 7;
-    leap_year = !leap_year && IsLeap(last_year_ + 1);
-  }
-
-  return true;
-}
-
 bool TimeZoneInfo::Load(ZoneInfoSource* zip) {
   // Read and validate the header.
   tzhead tzh;
@@ -479,9 +715,9 @@
   // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when
   // interpreting a POSIX spec that does not include start/end rules, and
   // that isn't the case here (see "zic -p").
-  bp += (8 + 4) * hdr.leapcnt;  // leap-time + TAI-UTC
-  bp += 1 * hdr.ttisstdcnt;     // UTC/local indicators
-  bp += 1 * hdr.ttisutcnt;      // standard/wall indicators
+  bp += (time_len + 4) * hdr.leapcnt;  // leap-time + TAI-UTC
+  bp += 1 * hdr.ttisstdcnt;            // UTC/local indicators
+  bp += 1 * hdr.ttisutcnt;             // standard/wall indicators
   assert(bp == tbuf.data() + tbuf.size());
 
   future_spec_.clear();
@@ -510,8 +746,8 @@
 
   // Trim redundant transitions. zic may have added these to work around
   // differences between the glibc and reference implementations (see
-  // zic.c:dontmerge) and the Qt library (see zic.c:WORK_AROUND_QTBUG_53071).
-  // For us, they just get in the way when we do future_spec_ extension.
+  // zic.c:dontmerge) or to avoid bugs in old readers. For us, they just
+  // get in the way when we do future_spec_ extension.
   while (hdr.timecnt > 1) {
     if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index,
                           transitions_[hdr.timecnt - 2].type_index)) {
@@ -574,145 +810,6 @@
   return true;
 }
 
-namespace {
-
-// fopen(3) adaptor.
-inline FILE* FOpen(const char* path, const char* mode) {
-#if defined(_MSC_VER)
-  FILE* fp;
-  if (fopen_s(&fp, path, mode) != 0) fp = nullptr;
-  return fp;
-#else
-  return fopen(path, mode);  // TODO: Enable the close-on-exec flag.
-#endif
-}
-
-// A stdio(3)-backed implementation of ZoneInfoSource.
-class FileZoneInfoSource : public ZoneInfoSource {
- public:
-  static std::unique_ptr<ZoneInfoSource> Open(const std::string& name);
-
-  std::size_t Read(void* ptr, std::size_t size) override {
-    size = std::min(size, len_);
-    std::size_t nread = fread(ptr, 1, size, fp_.get());
-    len_ -= nread;
-    return nread;
-  }
-  int Skip(std::size_t offset) override {
-    offset = std::min(offset, len_);
-    int rc = fseek(fp_.get(), static_cast<long>(offset), SEEK_CUR);
-    if (rc == 0) len_ -= offset;
-    return rc;
-  }
-  std::string Version() const override {
-    // TODO: It would nice if the zoneinfo data included the tzdb version.
-    return std::string();
-  }
-
- protected:
-  explicit FileZoneInfoSource(
-      FILE* fp, std::size_t len = std::numeric_limits<std::size_t>::max())
-      : fp_(fp, fclose), len_(len) {}
-
- private:
-  std::unique_ptr<FILE, int (*)(FILE*)> fp_;
-  std::size_t len_;
-};
-
-std::unique_ptr<ZoneInfoSource> FileZoneInfoSource::Open(
-    const std::string& name) {
-  // Use of the "file:" prefix is intended for testing purposes only.
-  const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
-
-  // Map the time-zone name to a path name.
-  std::string path;
-  if (pos == name.size() || name[pos] != '/') {
-    const char* tzdir = "/usr/share/zoneinfo";
-    char* tzdir_env = nullptr;
-#if defined(_MSC_VER)
-    _dupenv_s(&tzdir_env, nullptr, "TZDIR");
-#else
-    tzdir_env = std::getenv("TZDIR");
-#endif
-    if (tzdir_env && *tzdir_env) tzdir = tzdir_env;
-    path += tzdir;
-    path += '/';
-#if defined(_MSC_VER)
-    free(tzdir_env);
-#endif
-  }
-  path.append(name, pos, std::string::npos);
-
-  // Open the zoneinfo file.
-  FILE* fp = FOpen(path.c_str(), "rb");
-  if (fp == nullptr) return nullptr;
-  std::size_t length = 0;
-  if (fseek(fp, 0, SEEK_END) == 0) {
-    long offset = ftell(fp);
-    if (offset >= 0) {
-      length = static_cast<std::size_t>(offset);
-    }
-    rewind(fp);
-  }
-  return std::unique_ptr<ZoneInfoSource>(new FileZoneInfoSource(fp, length));
-}
-
-class AndroidZoneInfoSource : public FileZoneInfoSource {
- public:
-  static std::unique_ptr<ZoneInfoSource> Open(const std::string& name);
-  std::string Version() const override { return version_; }
-
- private:
-  explicit AndroidZoneInfoSource(FILE* fp, std::size_t len, const char* vers)
-      : FileZoneInfoSource(fp, len), version_(vers) {}
-  std::string version_;
-};
-
-std::unique_ptr<ZoneInfoSource> AndroidZoneInfoSource::Open(
-    const std::string& name) {
-  // Use of the "file:" prefix is intended for testing purposes only.
-  const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
-
-  // See Android's libc/tzcode/bionic.cpp for additional information.
-  for (const char* tzdata : {"/data/misc/zoneinfo/current/tzdata",
-                             "/system/usr/share/zoneinfo/tzdata"}) {
-    std::unique_ptr<FILE, int (*)(FILE*)> fp(FOpen(tzdata, "rb"), fclose);
-    if (fp.get() == nullptr) continue;
-
-    char hbuf[24];  // covers header.zonetab_offset too
-    if (fread(hbuf, 1, sizeof(hbuf), fp.get()) != sizeof(hbuf)) continue;
-    if (strncmp(hbuf, "tzdata", 6) != 0) continue;
-    const char* vers = (hbuf[11] == '\0') ? hbuf + 6 : "";
-    const std::int_fast32_t index_offset = Decode32(hbuf + 12);
-    const std::int_fast32_t data_offset = Decode32(hbuf + 16);
-    if (index_offset < 0 || data_offset < index_offset) continue;
-    if (fseek(fp.get(), static_cast<long>(index_offset), SEEK_SET) != 0)
-      continue;
-
-    char ebuf[52];  // covers entry.unused too
-    const std::size_t index_size =
-        static_cast<std::size_t>(data_offset - index_offset);
-    const std::size_t zonecnt = index_size / sizeof(ebuf);
-    if (zonecnt * sizeof(ebuf) != index_size) continue;
-    for (std::size_t i = 0; i != zonecnt; ++i) {
-      if (fread(ebuf, 1, sizeof(ebuf), fp.get()) != sizeof(ebuf)) break;
-      const std::int_fast32_t start = data_offset + Decode32(ebuf + 40);
-      const std::int_fast32_t length = Decode32(ebuf + 44);
-      if (start < 0 || length < 0) break;
-      ebuf[40] = '\0';  // ensure zone name is NUL terminated
-      if (strcmp(name.c_str() + pos, ebuf) == 0) {
-        if (fseek(fp.get(), static_cast<long>(start), SEEK_SET) != 0) break;
-        return std::unique_ptr<ZoneInfoSource>(new AndroidZoneInfoSource(
-            fp.release(), static_cast<std::size_t>(length), vers));
-      }
-    }
-  }
-
-  return nullptr;
-}
-
-}  // namespace
-
 bool TimeZoneInfo::Load(const std::string& name) {
   // We can ensure that the loading of UTC or any other fixed-offset
   // zone never fails because the simple, fixed-offset state can be
@@ -728,11 +825,24 @@
       name, [](const std::string& n) -> std::unique_ptr<ZoneInfoSource> {
         if (auto z = FileZoneInfoSource::Open(n)) return z;
         if (auto z = AndroidZoneInfoSource::Open(n)) return z;
+        if (auto z = FuchsiaZoneInfoSource::Open(n)) return z;
         return nullptr;
       });
   return zip != nullptr && Load(zip.get());
 }
 
+std::unique_ptr<TimeZoneInfo> TimeZoneInfo::UTC() {
+  auto tz = std::unique_ptr<TimeZoneInfo>(new TimeZoneInfo);
+  tz->ResetToBuiltinUTC(seconds::zero());
+  return tz;
+}
+
+std::unique_ptr<TimeZoneInfo> TimeZoneInfo::Make(const std::string& name) {
+  auto tz = std::unique_ptr<TimeZoneInfo>(new TimeZoneInfo);
+  if (!tz->Load(name)) tz.reset();  // fallback to UTC
+  return tz;
+}
+
 // BreakTime() translation for a particular transition type.
 time_zone::absolute_lookup TimeZoneInfo::LocalTime(
     std::int_fast64_t unix_time, const TransitionType& tt) const {
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h b/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h
index 2467ff5..689df6f 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h
@@ -18,6 +18,7 @@
 #include <atomic>
 #include <cstddef>
 #include <cstdint>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -64,12 +65,9 @@
 // A time zone backed by the IANA Time Zone Database (zoneinfo).
 class TimeZoneInfo : public TimeZoneIf {
  public:
-  TimeZoneInfo() = default;
-  TimeZoneInfo(const TimeZoneInfo&) = delete;
-  TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
-
-  // Loads the zoneinfo for the given name, returning true if successful.
-  bool Load(const std::string& name);
+  // Factories.
+  static std::unique_ptr<TimeZoneInfo> UTC();  // never fails
+  static std::unique_ptr<TimeZoneInfo> Make(const std::string& name);
 
   // TimeZoneIf implementations.
   time_zone::absolute_lookup BreakTime(
@@ -83,17 +81,9 @@
   std::string Description() const override;
 
  private:
-  struct Header {            // counts of:
-    std::size_t timecnt;     // transition times
-    std::size_t typecnt;     // transition types
-    std::size_t charcnt;     // zone abbreviation characters
-    std::size_t leapcnt;     // leap seconds (we expect none)
-    std::size_t ttisstdcnt;  // UTC/local indicators (unused)
-    std::size_t ttisutcnt;   // standard/wall indicators (unused)
-
-    bool Build(const tzhead& tzh);
-    std::size_t DataLength(std::size_t time_len) const;
-  };
+  TimeZoneInfo() = default;
+  TimeZoneInfo(const TimeZoneInfo&) = delete;
+  TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
 
   bool GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
                          const std::string& abbr, std::uint_least8_t* index);
@@ -102,6 +92,7 @@
   bool ExtendTransitions();
 
   bool ResetToBuiltinUTC(const seconds& offset);
+  bool Load(const std::string& name);
   bool Load(ZoneInfoSource* zip);
 
   // Helpers for BreakTime() and MakeTime().
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
index a14982a..d014612 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
@@ -27,6 +27,12 @@
 #include "absl/time/internal/cctz/include/cctz/civil_time.h"
 #include "absl/time/internal/cctz/include/cctz/time_zone.h"
 
+#if defined(_AIX)
+extern "C" {
+extern long altzone;
+}
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace time_internal {
@@ -44,7 +50,7 @@
   const bool is_dst = tm.tm_isdst > 0;
   return _tzname[is_dst];
 }
-#elif defined(__sun)
+#elif defined(__sun) || defined(_AIX)
 // Uses the globals: 'timezone', 'altzone' and 'tzname'.
 auto tm_gmtoff(const std::tm& tm) -> decltype(timezone) {
   const bool is_dst = tm.tm_isdst > 0;
@@ -56,7 +62,7 @@
 }
 #elif defined(__native_client__) || defined(__myriad2__) || \
     defined(__EMSCRIPTEN__)
-// Uses the globals: 'timezone' and 'tzname'.
+// Uses the globals: '_timezone' and 'tzname'.
 auto tm_gmtoff(const std::tm& tm) -> decltype(_timezone + 0) {
   const bool is_dst = tm.tm_isdst > 0;
   return _timezone + (is_dst ? 60 * 60 : 0);
@@ -65,6 +71,16 @@
   const bool is_dst = tm.tm_isdst > 0;
   return tzname[is_dst];
 }
+#elif defined(__VXWORKS__)
+// Uses the globals: 'timezone' and 'tzname'.
+auto tm_gmtoff(const std::tm& tm) -> decltype(timezone + 0) {
+  const bool is_dst = tm.tm_isdst > 0;
+  return timezone + (is_dst ? 60 * 60 : 0);
+}
+auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
+  const bool is_dst = tm.tm_isdst > 0;
+  return tzname[is_dst];
+}
 #else
 // Adapt to different spellings of the struct std::tm extension fields.
 #if defined(tm_gmtoff)
@@ -102,6 +118,7 @@
 }
 #endif  // tm_zone
 #endif
+using tm_gmtoff_t = decltype(tm_gmtoff(std::tm{}));
 
 inline std::tm* gm_time(const std::time_t* timep, std::tm* result) {
 #if defined(_WIN32) || defined(_WIN64)
@@ -119,37 +136,36 @@
 #endif
 }
 
-// Converts a civil second and "dst" flag into a time_t and UTC offset.
+// Converts a civil second and "dst" flag into a time_t and a struct tm.
 // Returns false if time_t cannot represent the requested civil second.
 // Caller must have already checked that cs.year() will fit into a tm_year.
-bool make_time(const civil_second& cs, int is_dst, std::time_t* t, int* off) {
-  std::tm tm;
-  tm.tm_year = static_cast<int>(cs.year() - year_t{1900});
-  tm.tm_mon = cs.month() - 1;
-  tm.tm_mday = cs.day();
-  tm.tm_hour = cs.hour();
-  tm.tm_min = cs.minute();
-  tm.tm_sec = cs.second();
-  tm.tm_isdst = is_dst;
-  *t = std::mktime(&tm);
+bool make_time(const civil_second& cs, int is_dst, std::time_t* t,
+               std::tm* tm) {
+  tm->tm_year = static_cast<int>(cs.year() - year_t{1900});
+  tm->tm_mon = cs.month() - 1;
+  tm->tm_mday = cs.day();
+  tm->tm_hour = cs.hour();
+  tm->tm_min = cs.minute();
+  tm->tm_sec = cs.second();
+  tm->tm_isdst = is_dst;
+  *t = std::mktime(tm);
   if (*t == std::time_t{-1}) {
     std::tm tm2;
     const std::tm* tmp = local_time(t, &tm2);
-    if (tmp == nullptr || tmp->tm_year != tm.tm_year ||
-        tmp->tm_mon != tm.tm_mon || tmp->tm_mday != tm.tm_mday ||
-        tmp->tm_hour != tm.tm_hour || tmp->tm_min != tm.tm_min ||
-        tmp->tm_sec != tm.tm_sec) {
+    if (tmp == nullptr || tmp->tm_year != tm->tm_year ||
+        tmp->tm_mon != tm->tm_mon || tmp->tm_mday != tm->tm_mday ||
+        tmp->tm_hour != tm->tm_hour || tmp->tm_min != tm->tm_min ||
+        tmp->tm_sec != tm->tm_sec) {
       // A true error (not just one second before the epoch).
       return false;
     }
   }
-  *off = static_cast<int>(tm_gmtoff(tm));
   return true;
 }
 
 // Find the least time_t in [lo:hi] where local time matches offset, given:
 // (1) lo doesn't match, (2) hi does, and (3) there is only one transition.
-std::time_t find_trans(std::time_t lo, std::time_t hi, int offset) {
+std::time_t find_trans(std::time_t lo, std::time_t hi, tm_gmtoff_t offset) {
   std::tm tm;
   while (lo + 1 != hi) {
     const std::time_t mid = lo + (hi - lo) / 2;
@@ -177,8 +193,9 @@
 
 }  // namespace
 
-TimeZoneLibC::TimeZoneLibC(const std::string& name)
-    : local_(name == "localtime") {}
+std::unique_ptr<TimeZoneLibC> TimeZoneLibC::Make(const std::string& name) {
+  return std::unique_ptr<TimeZoneLibC>(new TimeZoneLibC(name));
+}
 
 time_zone::absolute_lookup TimeZoneLibC::BreakTime(
     const time_point<seconds>& tp) const {
@@ -248,33 +265,37 @@
   // We probe with "is_dst" values of 0 and 1 to try to distinguish unique
   // civil seconds from skipped or repeated ones.  This is not always possible
   // however, as the "dst" flag does not change over some offset transitions.
-  // We are also subject to the vagaries of mktime() implementations.
+  // We are also subject to the vagaries of mktime() implementations. For
+  // example, some implementations treat "tm_isdst" as a demand (useless),
+  // and some as a disambiguator (useful).
   std::time_t t0, t1;
-  int offset0, offset1;
-  if (make_time(cs, 0, &t0, &offset0) && make_time(cs, 1, &t1, &offset1)) {
-    if (t0 == t1) {
+  std::tm tm0, tm1;
+  if (make_time(cs, 0, &t0, &tm0) && make_time(cs, 1, &t1, &tm1)) {
+    if (tm0.tm_isdst == tm1.tm_isdst) {
       // The civil time was singular (pre == trans == post).
-      const time_point<seconds> tp = FromUnixSeconds(t0);
+      const time_point<seconds> tp = FromUnixSeconds(tm0.tm_isdst ? t1 : t0);
       return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
     }
 
-    if (t0 > t1) {
+    tm_gmtoff_t offset = tm_gmtoff(tm0);
+    if (t0 < t1) {  // negative DST
       std::swap(t0, t1);
-      std::swap(offset0, offset1);
+      offset = tm_gmtoff(tm1);
     }
-    const std::time_t tt = find_trans(t0, t1, offset1);
+
+    const std::time_t tt = find_trans(t1, t0, offset);
     const time_point<seconds> trans = FromUnixSeconds(tt);
 
-    if (offset0 < offset1) {
+    if (tm0.tm_isdst) {
       // The civil time did not exist (pre >= trans > post).
-      const time_point<seconds> pre = FromUnixSeconds(t1);
-      const time_point<seconds> post = FromUnixSeconds(t0);
+      const time_point<seconds> pre = FromUnixSeconds(t0);
+      const time_point<seconds> post = FromUnixSeconds(t1);
       return {time_zone::civil_lookup::SKIPPED, pre, trans, post};
     }
 
     // The civil time was ambiguous (pre < trans <= post).
-    const time_point<seconds> pre = FromUnixSeconds(t0);
-    const time_point<seconds> post = FromUnixSeconds(t1);
+    const time_point<seconds> pre = FromUnixSeconds(t1);
+    const time_point<seconds> post = FromUnixSeconds(t0);
     return {time_zone::civil_lookup::REPEATED, pre, trans, post};
   }
 
@@ -303,6 +324,9 @@
   return local_ ? "localtime" : "UTC";
 }
 
+TimeZoneLibC::TimeZoneLibC(const std::string& name)
+    : local_(name == "localtime") {}
+
 }  // namespace cctz
 }  // namespace time_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h b/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h
index 1da9039..ae21073 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h
@@ -15,6 +15,7 @@
 #ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
 #define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
 
+#include <memory>
 #include <string>
 
 #include "absl/base/config.h"
@@ -27,10 +28,10 @@
 
 // A time zone backed by gmtime_r(3), localtime_r(3), and mktime(3),
 // and which therefore only supports UTC and the local time zone.
-// TODO: Add support for fixed offsets from UTC.
 class TimeZoneLibC : public TimeZoneIf {
  public:
-  explicit TimeZoneLibC(const std::string& name);
+  // Factory.
+  static std::unique_ptr<TimeZoneLibC> Make(const std::string& name);
 
   // TimeZoneIf implementations.
   time_zone::absolute_lookup BreakTime(
@@ -44,6 +45,10 @@
   std::string Description() const override;
 
  private:
+  explicit TimeZoneLibC(const std::string& name);
+  TimeZoneLibC(const TimeZoneLibC&) = delete;
+  TimeZoneLibC& operator=(const TimeZoneLibC&) = delete;
+
   const bool local_;  // localtime or UTC
 };
 
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
index efdea64..d22691b 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -28,6 +28,31 @@
 #include <vector>
 #endif
 
+#if defined(__Fuchsia__)
+#include <fuchsia/intl/cpp/fidl.h>
+#include <lib/async-loop/cpp/loop.h>
+#include <lib/fdio/directory.h>
+#include <zircon/types.h>
+#endif
+
+#if defined(_WIN32)
+#include <sdkddkver.h>
+// Include only when the SDK is for Windows 10 (and later), and the binary is
+// targeted for Windows XP and later.
+// Note: The Windows SDK added windows.globalization.h file for Windows 10, but
+// MinGW did not add it until NTDDI_WIN10_NI (SDK version 10.0.22621.0).
+#if ((defined(_WIN32_WINNT_WIN10) && !defined(__MINGW32__)) ||        \
+     (defined(NTDDI_WIN10_NI) && NTDDI_VERSION >= NTDDI_WIN10_NI)) && \
+    (_WIN32_WINNT >= _WIN32_WINNT_WINXP)
+#define USE_WIN32_LOCAL_TIME_ZONE
+#include <roapi.h>
+#include <tchar.h>
+#include <wchar.h>
+#include <windows.globalization.h>
+#include <windows.h>
+#endif
+#endif
+
 #include <cstdlib>
 #include <cstring>
 #include <string>
@@ -40,8 +65,8 @@
 namespace time_internal {
 namespace cctz {
 
-#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
 namespace {
+#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
 // Android 'L' removes __system_property_get() from the NDK, however
 // it is still a hidden symbol in libc so we use dlsym() to access it.
 // See Chromium's base/sys_info_android.cc for a similar example.
@@ -65,10 +90,85 @@
   static property_get_func system_property_get = LoadSystemPropertyGet();
   return system_property_get ? system_property_get(name, value) : -1;
 }
-
-}  // namespace
 #endif
 
+#if defined(USE_WIN32_LOCAL_TIME_ZONE)
+// Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the
+// local time zone. Returns an empty vector in case of an error.
+std::string win32_local_time_zone(const HMODULE combase) {
+  std::string result;
+  const auto ro_activate_instance =
+      reinterpret_cast<decltype(&RoActivateInstance)>(
+          GetProcAddress(combase, "RoActivateInstance"));
+  if (!ro_activate_instance) {
+    return result;
+  }
+  const auto windows_create_string_reference =
+      reinterpret_cast<decltype(&WindowsCreateStringReference)>(
+          GetProcAddress(combase, "WindowsCreateStringReference"));
+  if (!windows_create_string_reference) {
+    return result;
+  }
+  const auto windows_delete_string =
+      reinterpret_cast<decltype(&WindowsDeleteString)>(
+          GetProcAddress(combase, "WindowsDeleteString"));
+  if (!windows_delete_string) {
+    return result;
+  }
+  const auto windows_get_string_raw_buffer =
+      reinterpret_cast<decltype(&WindowsGetStringRawBuffer)>(
+          GetProcAddress(combase, "WindowsGetStringRawBuffer"));
+  if (!windows_get_string_raw_buffer) {
+    return result;
+  }
+
+  // The string returned by WindowsCreateStringReference doesn't need to be
+  // deleted.
+  HSTRING calendar_class_id;
+  HSTRING_HEADER calendar_class_id_header;
+  HRESULT hr = windows_create_string_reference(
+      RuntimeClass_Windows_Globalization_Calendar,
+      sizeof(RuntimeClass_Windows_Globalization_Calendar) / sizeof(wchar_t) - 1,
+      &calendar_class_id_header, &calendar_class_id);
+  if (FAILED(hr)) {
+    return result;
+  }
+
+  IInspectable* calendar;
+  hr = ro_activate_instance(calendar_class_id, &calendar);
+  if (FAILED(hr)) {
+    return result;
+  }
+
+  ABI::Windows::Globalization::ITimeZoneOnCalendar* time_zone;
+  hr = calendar->QueryInterface(IID_PPV_ARGS(&time_zone));
+  if (FAILED(hr)) {
+    calendar->Release();
+    return result;
+  }
+
+  HSTRING tz_hstr;
+  hr = time_zone->GetTimeZone(&tz_hstr);
+  if (SUCCEEDED(hr)) {
+    UINT32 wlen;
+    const PCWSTR tz_wstr = windows_get_string_raw_buffer(tz_hstr, &wlen);
+    if (tz_wstr) {
+      const int size =
+          WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
+                              nullptr, 0, nullptr, nullptr);
+      result.resize(static_cast<size_t>(size));
+      WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
+                          &result[0], size, nullptr, nullptr);
+    }
+    windows_delete_string(tz_hstr);
+  }
+  time_zone->Release();
+  calendar->Release();
+  return result;
+}
+#endif
+}  // namespace
+
 std::string time_zone::name() const { return effective_impl().Name(); }
 
 time_zone::absolute_lookup time_zone::lookup(
@@ -133,13 +233,89 @@
   if (CFStringRef tz_name = CFTimeZoneGetName(tz_default)) {
     CFStringEncoding encoding = kCFStringEncodingUTF8;
     CFIndex length = CFStringGetLength(tz_name);
-    buffer.resize(CFStringGetMaximumSizeForEncoding(length, encoding) + 1);
-    if (CFStringGetCString(tz_name, &buffer[0], buffer.size(), encoding)) {
+    CFIndex max_size = CFStringGetMaximumSizeForEncoding(length, encoding) + 1;
+    buffer.resize(static_cast<size_t>(max_size));
+    if (CFStringGetCString(tz_name, &buffer[0], max_size, encoding)) {
       zone = &buffer[0];
     }
   }
   CFRelease(tz_default);
 #endif
+#if defined(__Fuchsia__)
+  std::string primary_tz;
+  [&]() {
+    // Note: We can't use the synchronous FIDL API here because it doesn't
+    // allow timeouts; if the FIDL call failed, local_time_zone() would never
+    // return.
+
+    const zx::duration kTimeout = zx::msec(500);
+
+    // Don't attach to the thread because otherwise the thread's dispatcher
+    // would be set to null when the loop is destroyed, causing any other FIDL
+    // code running on the same thread to crash.
+    async::Loop loop(&kAsyncLoopConfigNeverAttachToThread);
+
+    fuchsia::intl::PropertyProviderHandle handle;
+    zx_status_t status = fdio_service_connect_by_name(
+        fuchsia::intl::PropertyProvider::Name_,
+        handle.NewRequest().TakeChannel().release());
+    if (status != ZX_OK) {
+      return;
+    }
+
+    fuchsia::intl::PropertyProviderPtr intl_provider;
+    status = intl_provider.Bind(std::move(handle), loop.dispatcher());
+    if (status != ZX_OK) {
+      return;
+    }
+
+    intl_provider->GetProfile(
+        [&loop, &primary_tz](fuchsia::intl::Profile profile) {
+          if (!profile.time_zones().empty()) {
+            primary_tz = profile.time_zones()[0].id;
+          }
+          loop.Quit();
+        });
+    loop.Run(zx::deadline_after(kTimeout));
+  }();
+
+  if (!primary_tz.empty()) {
+    zone = primary_tz.c_str();
+  }
+#endif
+#if defined(USE_WIN32_LOCAL_TIME_ZONE)
+  // Use the WinRT Calendar class to get the local time zone. This feature is
+  // available on Windows 10 and later. The library is dynamically linked to
+  // maintain binary compatibility with Windows XP - Windows 7. On Windows 8,
+  // The combase.dll API functions are available but the RoActivateInstance
+  // call will fail for the Calendar class.
+  std::string winrt_tz;
+  const HMODULE combase =
+      LoadLibraryEx(_T("combase.dll"), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+  if (combase) {
+    const auto ro_initialize = reinterpret_cast<decltype(&::RoInitialize)>(
+        GetProcAddress(combase, "RoInitialize"));
+    const auto ro_uninitialize = reinterpret_cast<decltype(&::RoUninitialize)>(
+        GetProcAddress(combase, "RoUninitialize"));
+    if (ro_initialize && ro_uninitialize) {
+      const HRESULT hr = ro_initialize(RO_INIT_MULTITHREADED);
+      // RPC_E_CHANGED_MODE means that a previous RoInitialize call specified
+      // a different concurrency model. The WinRT runtime is initialized and
+      // should work for our purpose here, but we should *not* call
+      // RoUninitialize because it's a failure.
+      if (SUCCEEDED(hr) || hr == RPC_E_CHANGED_MODE) {
+        winrt_tz = win32_local_time_zone(combase);
+        if (SUCCEEDED(hr)) {
+          ro_uninitialize();
+        }
+      }
+    }
+    FreeLibrary(combase);
+  }
+  if (!winrt_tz.empty()) {
+    zone = winrt_tz.c_str();
+  }
+#endif
 
   // Allow ${TZ} to override to default zone.
   char* tz_env = nullptr;
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc b/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc
index 9a1a8d6..4884c32 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc
@@ -21,10 +21,14 @@
 #include <thread>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "absl/base/config.h"
-#include "absl/time/internal/cctz/include/cctz/civil_time.h"
 #include "absl/time/internal/cctz/include/cctz/time_zone.h"
+#if defined(__linux__)
+#include <features.h>
+#endif
+
+#include "gtest/gtest.h"
+#include "absl/time/internal/cctz/include/cctz/civil_time.h"
 
 namespace chrono = std::chrono;
 
@@ -131,6 +135,7 @@
                                       "America/Cayman",
                                       "America/Chicago",
                                       "America/Chihuahua",
+                                      "America/Ciudad_Juarez",
                                       "America/Coral_Harbour",
                                       "America/Cordoba",
                                       "America/Costa_Rica",
@@ -485,6 +490,7 @@
                                       "Europe/Kaliningrad",
                                       "Europe/Kiev",
                                       "Europe/Kirov",
+                                      "Europe/Kyiv",
                                       "Europe/Lisbon",
                                       "Europe/Ljubljana",
                                       "Europe/London",
@@ -524,6 +530,7 @@
                                       "Europe/Zagreb",
                                       "Europe/Zaporozhye",
                                       "Europe/Zurich",
+                                      "Factory",
                                       "GB",
                                       "GB-Eire",
                                       "GMT",
@@ -579,6 +586,7 @@
                                       "Pacific/Guam",
                                       "Pacific/Honolulu",
                                       "Pacific/Johnston",
+                                      "Pacific/Kanton",
                                       "Pacific/Kiritimati",
                                       "Pacific/Kosrae",
                                       "Pacific/Kwajalein",
@@ -717,6 +725,22 @@
 }
 #endif
 
+TEST(TimeZone, UTC) {
+  const time_zone utc = utc_time_zone();
+
+  time_zone loaded_utc;
+  EXPECT_TRUE(load_time_zone("UTC", &loaded_utc));
+  EXPECT_EQ(loaded_utc, utc);
+
+  time_zone loaded_utc0;
+  EXPECT_TRUE(load_time_zone("UTC0", &loaded_utc0));
+  EXPECT_EQ(loaded_utc0, utc);
+
+  time_zone loaded_bad;
+  EXPECT_FALSE(load_time_zone("Invalid/TimeZone", &loaded_bad));
+  EXPECT_EQ(loaded_bad, utc);
+}
+
 TEST(TimeZone, NamedTimeZones) {
   const time_zone utc = utc_time_zone();
   EXPECT_EQ("UTC", utc.name());
@@ -892,19 +916,19 @@
   const time_zone utc = utc_time_zone();
   const time_point<chrono::nanoseconds> tp_ns =
       convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
-  EXPECT_EQ("04:05", format("%M:%E*S", tp_ns, utc));
+  EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_ns, utc));
   const time_point<chrono::microseconds> tp_us =
       convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
-  EXPECT_EQ("04:05", format("%M:%E*S", tp_us, utc));
+  EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_us, utc));
   const time_point<chrono::milliseconds> tp_ms =
       convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
-  EXPECT_EQ("04:05", format("%M:%E*S", tp_ms, utc));
+  EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_ms, utc));
   const time_point<chrono::seconds> tp_s =
       convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
-  EXPECT_EQ("04:05", format("%M:%E*S", tp_s, utc));
+  EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_s, utc));
   const time_point<absl::time_internal::cctz::seconds> tp_s64 =
       convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
-  EXPECT_EQ("04:05", format("%M:%E*S", tp_s64, utc));
+  EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_s64, utc));
 
   // These next two require chrono::time_point_cast because the conversion
   // from a resolution of seconds (the return value of convert()) to a
@@ -912,10 +936,10 @@
   const time_point<chrono::minutes> tp_m =
       chrono::time_point_cast<chrono::minutes>(
           convert(civil_second(2015, 1, 2, 3, 4, 5), utc));
-  EXPECT_EQ("04:00", format("%M:%E*S", tp_m, utc));
+  EXPECT_EQ("04:00", absl::time_internal::cctz::format("%M:%E*S", tp_m, utc));
   const time_point<chrono::hours> tp_h = chrono::time_point_cast<chrono::hours>(
       convert(civil_second(2015, 1, 2, 3, 4, 5), utc));
-  EXPECT_EQ("00:00", format("%M:%E*S", tp_h, utc));
+  EXPECT_EQ("00:00", absl::time_internal::cctz::format("%M:%E*S", tp_h, utc));
 }
 
 TEST(MakeTime, Normalization) {
@@ -941,9 +965,11 @@
 
   // Approach the maximal time_point<cctz::seconds> value from below.
   tp = convert(civil_second(292277026596, 12, 4, 15, 30, 6), utc);
-  EXPECT_EQ("292277026596-12-04T15:30:06+00:00", format(RFC3339, tp, utc));
+  EXPECT_EQ("292277026596-12-04T15:30:06+00:00",
+            absl::time_internal::cctz::format(RFC3339, tp, utc));
   tp = convert(civil_second(292277026596, 12, 4, 15, 30, 7), utc);
-  EXPECT_EQ("292277026596-12-04T15:30:07+00:00", format(RFC3339, tp, utc));
+  EXPECT_EQ("292277026596-12-04T15:30:07+00:00",
+            absl::time_internal::cctz::format(RFC3339, tp, utc));
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
   tp = convert(civil_second(292277026596, 12, 4, 15, 30, 8), utc);
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
@@ -952,7 +978,8 @@
 
   // Checks that we can also get the maximal value for a far-east zone.
   tp = convert(civil_second(292277026596, 12, 5, 5, 30, 7), east);
-  EXPECT_EQ("292277026596-12-05T05:30:07+14:00", format(RFC3339, tp, east));
+  EXPECT_EQ("292277026596-12-05T05:30:07+14:00",
+            absl::time_internal::cctz::format(RFC3339, tp, east));
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
   tp = convert(civil_second(292277026596, 12, 5, 5, 30, 8), east);
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
@@ -961,7 +988,8 @@
 
   // Checks that we can also get the maximal value for a far-west zone.
   tp = convert(civil_second(292277026596, 12, 4, 1, 30, 7), west);
-  EXPECT_EQ("292277026596-12-04T01:30:07-14:00", format(RFC3339, tp, west));
+  EXPECT_EQ("292277026596-12-04T01:30:07-14:00",
+            absl::time_internal::cctz::format(RFC3339, tp, west));
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
   tp = convert(civil_second(292277026596, 12, 4, 7, 30, 8), west);
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
@@ -970,9 +998,11 @@
 
   // Approach the minimal time_point<cctz::seconds> value from above.
   tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 53), utc);
-  EXPECT_EQ("-292277022657-01-27T08:29:53+00:00", format(RFC3339, tp, utc));
+  EXPECT_EQ("-292277022657-01-27T08:29:53+00:00",
+            absl::time_internal::cctz::format(RFC3339, tp, utc));
   tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 52), utc);
-  EXPECT_EQ("-292277022657-01-27T08:29:52+00:00", format(RFC3339, tp, utc));
+  EXPECT_EQ("-292277022657-01-27T08:29:52+00:00",
+            absl::time_internal::cctz::format(RFC3339, tp, utc));
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
   tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 51), utc);
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
@@ -981,7 +1011,8 @@
 
   // Checks that we can also get the minimal value for a far-east zone.
   tp = convert(civil_second(-292277022657, 1, 27, 22, 29, 52), east);
-  EXPECT_EQ("-292277022657-01-27T22:29:52+14:00", format(RFC3339, tp, east));
+  EXPECT_EQ("-292277022657-01-27T22:29:52+14:00",
+            absl::time_internal::cctz::format(RFC3339, tp, east));
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
   tp = convert(civil_second(-292277022657, 1, 27, 22, 29, 51), east);
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
@@ -990,7 +1021,8 @@
 
   // Checks that we can also get the minimal value for a far-west zone.
   tp = convert(civil_second(-292277022657, 1, 26, 18, 29, 52), west);
-  EXPECT_EQ("-292277022657-01-26T18:29:52-14:00", format(RFC3339, tp, west));
+  EXPECT_EQ("-292277022657-01-26T18:29:52-14:00",
+            absl::time_internal::cctz::format(RFC3339, tp, west));
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
   tp = convert(civil_second(-292277022657, 1, 26, 18, 29, 51), west);
   EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
@@ -1007,14 +1039,20 @@
     const time_zone cut = LoadZone("libc:UTC");
     const year_t max_tm_year = year_t{std::numeric_limits<int>::max()} + 1900;
     tp = convert(civil_second(max_tm_year, 12, 31, 23, 59, 59), cut);
-#if defined(__FreeBSD__) || defined(__OpenBSD__)
-    // The BSD gmtime_r() fails on extreme positive tm_year values.
+#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__EMSCRIPTEN__)
+    // Some gmtime_r() impls fail on extreme positive values.
 #else
-    EXPECT_EQ("2147485547-12-31T23:59:59+00:00", format(RFC3339, tp, cut));
+    EXPECT_EQ("2147485547-12-31T23:59:59+00:00",
+              absl::time_internal::cctz::format(RFC3339, tp, cut));
 #endif
     const year_t min_tm_year = year_t{std::numeric_limits<int>::min()} + 1900;
     tp = convert(civil_second(min_tm_year, 1, 1, 0, 0, 0), cut);
-    EXPECT_EQ("-2147481748-01-01T00:00:00+00:00", format(RFC3339, tp, cut));
+#if defined(__Fuchsia__) || defined(__EMSCRIPTEN__)
+    // Some gmtime_r() impls fail on extreme negative values (fxbug.dev/78527).
+#else
+    EXPECT_EQ("-2147481748-01-01T00:00:00+00:00",
+              absl::time_internal::cctz::format(RFC3339, tp, cut));
+#endif
 #endif
   }
 }
@@ -1026,7 +1064,7 @@
   //  1) we know how to change the time zone used by localtime()/mktime(),
   //  2) cctz and localtime()/mktime() will use similar-enough tzdata, and
   //  3) we have some idea about how mktime() behaves during transitions.
-#if defined(__linux__) && !defined(__ANDROID__)
+#if defined(__linux__) && defined(__GLIBC__) && !defined(__ANDROID__)
   const char* const ep = getenv("TZ");
   std::string tz_name = (ep != nullptr) ? ep : "";
   for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
@@ -1039,7 +1077,7 @@
          tp = zi.lookup(transition.to).trans) {
       const auto fcl = zi.lookup(transition.from);
       const auto tcl = zi.lookup(transition.to);
-      civil_second cs;  // compare cs in zi and lc
+      civil_second cs, us;  // compare cs and us in zi and lc
       if (fcl.kind == time_zone::civil_lookup::UNIQUE) {
         if (tcl.kind == time_zone::civil_lookup::UNIQUE) {
           // Both unique; must be an is_dst or abbr change.
@@ -1055,12 +1093,14 @@
         }
         ASSERT_EQ(time_zone::civil_lookup::REPEATED, tcl.kind);
         cs = transition.to;
+        us = transition.from;
       } else {
         ASSERT_EQ(time_zone::civil_lookup::UNIQUE, tcl.kind);
         ASSERT_EQ(time_zone::civil_lookup::SKIPPED, fcl.kind);
         cs = transition.from;
+        us = transition.to;
       }
-      if (cs.year() > 2037) break;  // limit test time (and to 32-bit time_t)
+      if (us.year() > 2037) break;  // limit test time (and to 32-bit time_t)
       const auto cl_zi = zi.lookup(cs);
       if (zi.lookup(cl_zi.pre).is_dst == zi.lookup(cl_zi.post).is_dst) {
         // The "libc" implementation cannot correctly classify transitions
@@ -1092,6 +1132,13 @@
       EXPECT_EQ(cl_zi.pre, cl_lc.pre);
       EXPECT_EQ(cl_zi.trans, cl_lc.trans);
       EXPECT_EQ(cl_zi.post, cl_lc.post);
+      const auto ucl_zi = zi.lookup(us);
+      const auto ucl_lc = lc.lookup(us);
+      SCOPED_TRACE(testing::Message() << "For " << us << " in " << *np);
+      EXPECT_EQ(ucl_zi.kind, ucl_lc.kind);
+      EXPECT_EQ(ucl_zi.pre, ucl_lc.pre);
+      EXPECT_EQ(ucl_zi.trans, ucl_lc.trans);
+      EXPECT_EQ(ucl_zi.post, ucl_lc.post);
     }
   }
   if (ep == nullptr) {
@@ -1165,6 +1212,45 @@
   // We have a transition but we don't know which one.
 }
 
+TEST(NextTransition, Scan) {
+  for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
+    SCOPED_TRACE(testing::Message() << "In " << *np);
+    time_zone tz;
+    // EXPECT_TRUE(load_time_zone(*np, &tz));
+    if (!load_time_zone(*np, &tz)) {
+      continue;  // tolerate kTimeZoneNames/zoneinfo skew
+    }
+
+    auto tp = time_point<absl::time_internal::cctz::seconds>::min();
+    time_zone::civil_transition trans;
+    while (tz.next_transition(tp, &trans)) {
+      time_zone::civil_lookup from_cl = tz.lookup(trans.from);
+      EXPECT_NE(from_cl.kind, time_zone::civil_lookup::REPEATED);
+      time_zone::civil_lookup to_cl = tz.lookup(trans.to);
+      EXPECT_NE(to_cl.kind, time_zone::civil_lookup::SKIPPED);
+
+      auto trans_tp = to_cl.trans;
+      time_zone::absolute_lookup trans_al = tz.lookup(trans_tp);
+      EXPECT_EQ(trans_al.cs, trans.to);
+      auto pre_trans_tp = trans_tp - absl::time_internal::cctz::seconds(1);
+      time_zone::absolute_lookup pre_trans_al = tz.lookup(pre_trans_tp);
+      EXPECT_EQ(pre_trans_al.cs + 1, trans.from);
+
+      auto offset_delta = trans_al.offset - pre_trans_al.offset;
+      EXPECT_EQ(offset_delta, trans.to - trans.from);
+      if (offset_delta == 0) {
+        // This "transition" is only an is_dst or abbr change.
+        EXPECT_EQ(to_cl.kind, time_zone::civil_lookup::UNIQUE);
+        if (trans_al.is_dst == pre_trans_al.is_dst) {
+          EXPECT_STRNE(trans_al.abbr, pre_trans_al.abbr);
+        }
+      }
+
+      tp = trans_tp;  // continue scan from transition
+    }
+  }
+}
+
 TEST(TimeZoneEdgeCase, AmericaNewYork) {
   const time_zone tz = LoadZone("America/New_York");
 
diff --git a/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h b/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h
index 0cf2905..7fd2b9e 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h
+++ b/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h
@@ -104,7 +104,7 @@
 
 // The entirety of a POSIX-string specified time-zone rule. The standard
 // abbreviation and offset are always given. If the time zone includes
-// daylight saving, then the daylight abbrevation is non-empty and the
+// daylight saving, then the daylight abbreviation is non-empty and the
 // remaining fields are also valid. Note that the start/end transitions
 // are not ordered---in the southern hemisphere the transition to end
 // daylight time occurs first in any particular year.
diff --git a/abseil-cpp/absl/time/internal/cctz/src/tzfile.h b/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
index 659f84c..9613055 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
+++ b/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
@@ -43,7 +43,7 @@
 
 struct tzhead {
   char tzh_magic[4];      /* TZ_MAGIC */
-  char tzh_version[1];    /* '\0' or '2' or '3' as of 2013 */
+  char tzh_version[1];    /* '\0' or '2'-'4' as of 2021 */
   char tzh_reserved[15];  /* reserved; must be zero */
   char tzh_ttisutcnt[4];  /* coded number of trans. time flags */
   char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */
@@ -102,21 +102,25 @@
 */
 
 #ifndef TZ_MAX_TIMES
+/* This must be at least 242 for Europe/London with 'zic -b fat'.  */
 #define TZ_MAX_TIMES 2000
 #endif /* !defined TZ_MAX_TIMES */
 
 #ifndef TZ_MAX_TYPES
-/* This must be at least 17 for Europe/Samara and Europe/Vilnius.  */
+/* This must be at least 18 for Europe/Vilnius with 'zic -b fat'.  */
 #define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
-#endif /* !defined TZ_MAX_TYPES */
+#endif                   /* !defined TZ_MAX_TYPES */
 
 #ifndef TZ_MAX_CHARS
+/* This must be at least 40 for America/Anchorage.  */
 #define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
-/* (limited by what unsigned chars can hold) */
-#endif /* !defined TZ_MAX_CHARS */
+                        /* (limited by what unsigned chars can hold) */
+#endif                  /* !defined TZ_MAX_CHARS */
 
 #ifndef TZ_MAX_LEAPS
+/* This must be at least 27 for leap seconds from 1972 through mid-2023.
+   There's a plan to discontinue leap seconds by 2035.  */
 #define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
-#endif /* !defined TZ_MAX_LEAPS */
+#endif                  /* !defined TZ_MAX_LEAPS */
 
 #endif /* !defined TZFILE_H */
diff --git a/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc b/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc
index 7209533..9bc8197 100644
--- a/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc
+++ b/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc
@@ -58,14 +58,15 @@
 // MinGW is GCC on Windows, so while it asserts __has_attribute(weak), the
 // Windows linker cannot handle that. Nor does the MinGW compiler know how to
 // pass "#pragma comment(linker, ...)" to the Windows linker.
-#if (__has_attribute(weak) || defined(__GNUC__)) && !defined(__MINGW32__)
+#if (__has_attribute(weak) || defined(__GNUC__)) && !defined(__MINGW32__) && \
+    !defined(__CYGWIN__)
 ZoneInfoSourceFactory zone_info_source_factory __attribute__((weak)) =
     DefaultFactory;
 #elif defined(_MSC_VER) && !defined(__MINGW32__) && !defined(_LIBCPP_VERSION)
 extern ZoneInfoSourceFactory zone_info_source_factory;
 extern ZoneInfoSourceFactory default_factory;
 ZoneInfoSourceFactory default_factory = DefaultFactory;
-#if defined(_M_IX86)
+#if defined(_M_IX86) || defined(_M_ARM)
 #pragma comment(                                                                                                         \
     linker,                                                                                                              \
     "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS                    \
@@ -83,8 +84,7 @@
     "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS                                   \
     "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
     "@@ZA")
-#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM) || \
-    defined(_M_ARM64)
+#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM64)
 #pragma comment(                                                                                                          \
     linker,                                                                                                               \
     "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS                     \
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo b/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo
index 95fb4a9..67e9c40 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo
@@ -21,6 +21,7 @@
                 REDO=posix_only \
                 LOCALTIME=Factory \
                 TZDATA_TEXT= \
+                PACKRATDATA=backzone PACKRATLIST=zone.tab \
                 ZONETABLES=zone1970.tab
     tar --create --dereference --hard-dereference --file tzfile.tar \
         --directory=tz tzfile.h
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/version b/abseil-cpp/absl/time/internal/cctz/testdata/version
index 7f680ee..7daa77e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/version
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/version
@@ -1 +1 @@
-2020a
+2023c
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan
index 28b32ab..8906e88 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra
index 697b993..c39ae38 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa
index 9a2918f..4e8951f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers
index ae04342..56a4dd2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara
index 9a2918f..194e986 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera
index 9a2918f..194e986 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako
index 28b32ab..3cb875f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui
index 0c80137..0021d2d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul
index 28b32ab..b235744 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau
index 82ea5aa..0da1d1e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre
index 52753c0..d7bca1e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville
index 0c80137..57a723b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura
index 52753c0..90b8679 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo
index d3f8196..1e6d48d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca
index d39016b..240ebb2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta
index 850c8f0..a461dce 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry
index 28b32ab..c22c328 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar
index 28b32ab..1f04c58 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam
index 9a2918f..b37c2b4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti
index 9a2918f..e9bbc7a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala
index 0c80137..65001f6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun
index 066fbed..909c5f9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown
index 28b32ab..8431ed6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone
index 52753c0..e442098 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare
index 52753c0..c4a502c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg
index b1c425d..bada063 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba
index 625b1ac..0aba9ff 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala
index 9a2918f..3021d84 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum
index 8ee8cb9..3f8e44b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali
index 52753c0..b2eff57 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa
index 0c80137..8d6f2a8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos
index 0c80137..3d7a71b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville
index 0c80137..1544cf5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome
index 28b32ab..8e2b700 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda
index 0c80137..226d87f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi
index 52753c0..14e1ee1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka
index 52753c0..18fcb16 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo
index 0c80137..8a3f4e9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo
index 52753c0..651e5cf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru
index b1c425d..820d852 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane
index b1c425d..d57a53c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu
index 9a2918f..25a5973 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia
index 6d68850..8377809 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi
index 9a2918f..5f4ebcb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena
index a968845..ecbc096 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey
index 0c80137..bdf222a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott
index 28b32ab..faa6f32 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou
index 28b32ab..f4e55ae 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo
index 0c80137..a869ec3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome
index 59f3759..425ad3f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu
index 28b32ab..3cb875f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli
index 07b393b..e0c8997 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis
index 427fa56..ca324cb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek
index abecd13..0edc52b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Adak b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Adak
index 4323649..b1497bd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Adak
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Adak
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage
index 9bbb2fd..cdf0572 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla
index 697cf5b..d057735 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua
index 697cf5b..7ef2cc9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina
index 49381b4..f66c9f7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires
index 260f86a..d6f999b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca
index 0ae222a..1dcc8d8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia
index 0ae222a..1dcc8d8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba
index da4c23a..35a52e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy
index 604b856..b275f27 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja
index 2218e36..23fca12 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza
index f9e677f..691c569 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos
index c36587e..991d1fa 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta
index 0e797f2..58863e0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan
index 2698495..7eba33c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis
index fe50f62..0a81cbd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman
index c954000..10556d5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia
index 3643628..e031750 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba
index f7ab6ef..6158ca5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion
index 2f3bbda..6225036 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan
index 629ed42..c828715 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atka b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atka
index 4323649..b1497bd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atka
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atka
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia
index 15808d3..7969e30 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas
index 896af3f..48faea2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados
index 9b90e30..720c986 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belem b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belem
index 60b5924..e0d7653 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belem
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belem
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belize b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belize
index 851051a..bfc19f4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belize
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belize
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon
index f9f13a1..7096b69 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista
index 978c331..fca9720 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota
index b2647d7..85b9033 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boise b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boise
index f8d54e2..72fec9e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boise
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boise
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires
index 260f86a..d6f999b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay
index f8db4b6..1092f4b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande
index 8120624..6855e4e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun
index f907f0a..640b259 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas
index eedf725..8dbe6ff 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca
index 0ae222a..1dcc8d8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne
index e5bc06f..cd49f05 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman
index 9964b9a..8be5515 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago
index a5b1617..b016880 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua
index 8ed5f93..5e0a54f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ciudad_Juarez b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ciudad_Juarez
new file mode 100644
index 0000000..f636ee6
--- /dev/null
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ciudad_Juarez
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour
index 629ed42..c828715 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba
index da4c23a..35a52e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica
index 37cb85e..08f0128 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Creston b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Creston
index ca64857..9d69a0a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Creston
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Creston
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba
index 9bea3d4..c09a875 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao
index f7ab6ef..d6ddf7d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn
index 9549adc..8718efc 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson
index 2b6c3ee..07e4c5f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek
index db9e339..761d1d9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Denver b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Denver
index 5fbe26b..09e54e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Denver
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Denver
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit
index e104faa..6eb3ac4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica
index 697cf5b..7c7cebf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton
index cd78a6f..645ee94 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe
index 39d6dae..7da4b98 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador
index e2f2230..4348411 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada
index ada6bf7..e8be26b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson
index 5a0b7f1..2a49c6c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne
index 09511cc..6b08d15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza
index be57dc2..092e40d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay
index 48412a4..f85eb34 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab
index 0160308..00b57bb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay
index a3f2990..820e0dd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk
index b9bb063..9d90e74 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada
index 697cf5b..a58e63a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe
index 697cf5b..7174738 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala
index 407138c..8aa8e58 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil
index 0559a7a..381ae6c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana
index d5dab14..bcc6688 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax
index 756099a..9fa850a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Havana b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Havana
index b69ac45..e06629d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Havana
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Havana
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo
index 791a9fa..5c92e29 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis
index 09511cc..6b08d15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox
index fcd408d..b187d5f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo
index 1abf75e..a730fe6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg
index 0133548..341a023 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City
index 7bbb653..76e1f62 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay
index d236b7c..f2acf6c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes
index c818929..c255f89 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac
index 630935c..8700ed9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis
index 09511cc..6b08d15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik
index 87bb355..86639f6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit
index c8138bd..95e055c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica
index 2a9b7fd..be6b1b6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy
index 604b856..b275f27 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau
index 451f349..e347b36 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville
index 177836e..f2136d6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello
index 438e3ea..d9f54a1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN
index fcd408d..b187d5f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk
index f7ab6ef..d6ddf7d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz
index a101372..68ddaae 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lima b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lima
index 3c6529b..b643c55 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lima
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lima
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles
index 9dad4f4..aaf0778 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville
index 177836e..f2136d6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes
index f7ab6ef..d6ddf7d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio
index bc8b951..dbb8d57 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Managua b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Managua
index e0242bf..86ef76b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Managua
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Managua
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus
index 63d58f8..59c952e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot
index 697cf5b..f4fe590 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique
index 8df43dc..25c0232 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros
index 047968d..88cabcd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan
index e4a7857..97d4d36 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza
index f9e677f..691c569 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee
index 3146138..28d2c56 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida
index ea852da..e5de113 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla
index 1e94be3..9fefee3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City
index e7fb6f2..80a415c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon
index b924b71..3b62585 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton
index 9df8d0f..ecb69ef 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey
index a8928c8..a5822e2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo
index 2f357bc..4b2fb3e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal
index 6752c5b..fe6be8e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat
index 697cf5b..41bf898 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau
index 33cc6c6..2ef2aa8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/New_York b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/New_York
index 2f75480..2b6c2ee 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/New_York
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/New_York
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon
index f6a856e..fe6be8e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nome b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nome
index 10998df..23ead1c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nome
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nome
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha
index f140726..9e74745 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah
index 246345d..becf438 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center
index 1fa0703..d03bda0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem
index 123f2ae..ecefc15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk
index 0160308..00b57bb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga
index fc4a03e..2fc74e9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Panama b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Panama
index 9964b9a..9154643 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Panama
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Panama
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung
index 3e4e0db..95e055c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo
index bc8a6ed..24f925a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix
index ac6bb0c..c2bd2f9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince
index 287f143..3e75731 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain
index 697cf5b..f4fe590 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre
index a374cb4..fb5185c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho
index 2e873a5..7f8047d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico
index a662a57..47b4dc3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
index a5a8af5..aa839ea 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River
index ea66099..7e646d1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet
index 3a70587..6d1d90d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Recife b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Recife
index d7abb16..305abcb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Recife
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Recife
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Regina b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Regina
index 20c9c84..a3f8217 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Regina
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Regina
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute
index 0a73b75..97eb8a9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco
index a374cb4..fb5185c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario
index da4c23a..35a52e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel
index ada6bf7..e8be26b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem
index c28f360..f81d144 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
index 816a042..d3fc9b8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo
index 4fe36fd..3e07850 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo
index 13ff083..a16da2c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund
index e20e9e1..6db4912 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock
index 5fbe26b..09e54e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka
index 31f7061..36681ed 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy
index 697cf5b..f4fe590 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns
index 65a5b0c..e5f2aec 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts
index 697cf5b..6170b6c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia
index 697cf5b..e265baf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas
index 697cf5b..0e62d30 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent
index 697cf5b..64cbf90 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current
index 8e9ef25..bdbb494 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa
index 2adacb2..38036a3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thule b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thule
index 6f802f1..f38dc56 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thule
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thule
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay
index e504c9a..fe6be8e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana
index ada6bf7..e8be26b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto
index 6752c5b..fe6be8e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola
index 697cf5b..a0a5d60 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver
index bb60cbc..c998491 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin
index 697cf5b..0e62d30 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse
index 062b58c..40baa9a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg
index ac40299..7e646d1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat
index da209f9..773feba 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife
index e6afa39..645ee94 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey
index f100f47..30315cc 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis
index 916f2c2..3ec3222 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville
index a71b39c..c0cfc85 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie
index 616afd9..3fc1f23 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson
index b32e7fd..05e4c6c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo
index 6575fdc..ea1f8f8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer
index 3dd85f8..32c1941 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera
index 8b2430a..ea49c00 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole
index 6575fdc..ea1f8f8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa
index 254af7d..97d80d7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll
index 5e565da..4e31aff 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok
index 7283053..6e32907 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen
index 15a34c3..dfc5095 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden
index 2aea25f..ac57147 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty
index a4b0077..3ec4fc8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman
index c9e8707..a3f9dff 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr
index 6ed8b7c..551884d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau
index e2d0f91..3a40d11 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe
index 06f0a13..62c5840 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat
index 73891af..8482167 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad
index 73891af..8482167 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau
index 8b5153e..cb2c82f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad
index f7162ed..a3ce975 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain
index 63188b2..33f7a20 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku
index a0de74b..96203d7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok
index c292ac5..ed687d2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul
index 759592a..ff976dd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut
index fb266ed..55dce57 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek
index f6e20dd..fe7832c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei
index 3dab0ab..e67b411 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta
index 0014046..00bc80a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita
index c4149c0..9d49cd3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan
index e48daa8..0a948c2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing
index 91f6f8b..d6b6698 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking
index 91f6f8b..d6b6698 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo
index 62c64d8..3eeb1b7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca
index b11c928..2813680 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus
index d9104a7..bd1624d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka
index b11c928..2813680 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili
index 30943bb..bb7be9f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai
index fc0a589..58d75bc 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe
index 82d85b8..d83fb07 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta
index 653b146..cc44179 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
index 592b632..7e83389 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin
index 91f6f8b..d6b6698 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
index ae82f9b..fcf923b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh
index e2934e3..de53596 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong
index 23d0375..c80e364 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd
index 4cb800a..6e08a26 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk
index 4dcbbb7..550e2a0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul
index 508446b..c891866 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta
index 5baa3a8..c9752d2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura
index 3002c82..7c22f53 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem
index 440ef06..4c49bbf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul
index d19b9bd..660ce4c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka
index 3e80b4e..c651554 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi
index ba65c0e..e56d5af 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar
index faa14d9..69ff7f6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu
index a5d5107..3a0d330 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu
index a5d5107..3a0d330 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga
index 72bea64..aeb7332 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata
index 0014046..00bc80a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk
index 30c6f16..e0d4fcb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur
index 612b01e..b396dec 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching
index c86750c..59bc6e4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait
index 2aea25f..5c7f106 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao
index cac6506..c22f75e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau
index cac6506..c22f75e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan
index b4fcac1..16bac84 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar
index 556ba86..5990010 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila
index f4f4b04..3c3584e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat
index fc0a589..cce5e19 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia
index f7f10ab..c210d0a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk
index d983276..9378d50 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk
index e0ee5fc..65a9fa2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk
index b29b769..dc0ed42 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral
index ad1f9ca..25a63ec 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh
index c292ac5..c49800e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak
index 12ce24c..285bed2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang
index 7ad7e0b..57240cf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar
index 63188b2..7409d74 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay
index 73b9d96..ff6fe61 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda
index c2fe4c1..fe4d6c6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon
index dd77395..14b2ad0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh
index 2aea25f..01c47cc 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon
index e2934e3..de53596 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin
index 485459c..69f0faa 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand
index 030d47c..c43e27c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul
index 96199e7..1755147 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai
index 91f6f8b..d6b6698 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore
index 2364b21..dbbdea3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk
index 261a983..7fdee5c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei
index 24c4344..35d89d0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent
index 32a9d7d..65ee428 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi
index b608d79..166e434 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran
index 8cec5ad..824acb0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv
index 440ef06..4c49bbf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu
index fe409c7..0edc72c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu
index fe409c7..0edc72c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo
index 26f4d34..1aa066c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk
index 670e2ad..c3c307d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang
index 556ba86..5990010 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar
index 2e20cc3..6f5d3a1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator
index 2e20cc3..6f5d3a1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi
index faa14d9..69ff7f6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera
index 9e4a78f..c39331e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane
index c292ac5..659e511 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok
index 8ab253c..72a3d4e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk
index c815e99..336f932 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon
index dd77395..14b2ad0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg
index 6958d7e..a3bf7f2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan
index 250bfe0..6dd927c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores
index 56593db..e6e2616 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda
index 419c660..abc75ea 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary
index f319215..5ab3243 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde
index e2a49d2..8f7de1c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe
index 4dab7ef..9558bf7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe
index 4dab7ef..9558bf7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen
index 15a34c3..dfc5095 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira
index 5213761..cf965c3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik
index 10e0fc8..2451aca 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia
index 4466608..7fa5f46 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena
index 28b32ab..6f75068 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley
index 88077f1..1a4c8ea 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT
index 7636592..1975a3a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide
index 0b1252a..3bfbbc5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane
index 3021bdb..dc9a980 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill
index 1ac3fc8..947b509 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra
index 7636592..1975a3a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie
index f65a990..dc2ef55 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin
index 1cf5029..a6a6730 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla
index 98ae557..9080f5c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart
index 02b07ca..dc2ef55 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI
index 9e04a80..4d4ec8c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman
index eab0fb9..131d77b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe
index 9e04a80..4d4ec8c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne
index ba45733..d3f195a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW
index 7636592..1975a3a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/North b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/North
index 1cf5029..a6a6730 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/North
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/North
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth
index a876b9e..4f77182 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland
index 3021bdb..dc9a980 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/South b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/South
index 0b1252a..3bfbbc5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/South
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/South
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney
index 7636592..1975a3a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania
index 02b07ca..dc2ef55 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria
index ba45733..d3f195a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/West b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/West
index a876b9e..4f77182 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/West
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/West
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna
index 1ac3fc8..947b509 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre
index a374cb4..fb5185c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha
index f140726..9e74745 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East
index 13ff083..a16da2c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West
index 63d58f8..59c952e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CET b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CET
index 122e934..546748d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CET
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CET
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT
index ca67929..d931558 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic
index 756099a..9fa850a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central
index ac40299..7e646d1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern
index 6752c5b..fe6be8e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain
index cd78a6f..645ee94 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland
index 65a5b0c..e5f2aec 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific
index bb60cbc..c998491 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan
index 20c9c84..a3f8217 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon
index 062b58c..40baa9a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
index 816a042..d3fc9b8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland
index cae3744..54dff00 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Cuba b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Cuba
index b69ac45..e06629d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Cuba
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Cuba
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EET b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EET
index cbdb71d..378919e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EET
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EET
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST
index 21ebc00..3ae9691 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT
index 9bce500..50c95e0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Egypt b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Egypt
index d3f8196..1e6d48d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Egypt
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Egypt
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire
index 1d99490..17d2b15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1
index 4dab6f9..98d5dcf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10
index c749290..ecb287e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11
index d969982..e941412 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12
index cdeec90..9c95bd0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2
index fbd2a94..6d5ce3d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3
index ee246ef..5ef7be7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4
index 5a25ff2..75f1621 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5
index c0b745f..589990a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6
index 06e777d..fcb60ca 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7
index 4e0b53a..c0427a4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8
index 714b0c5..9bdc228 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9
index 78b9daa..ca7a81f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1
index a838beb..cb45601 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10
index 68ff77d..11d988e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11
index 66af5a4..f4c5d5c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12
index 17ba505..cd397b0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13
index 5f3706c..8fad7c6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14
index 7e9f9c4..a595e60 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2
index fcef6d9..97b44a9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3
index 27973bc..4eb17ff 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4
index 1efd841..13aef80 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5
index 1f76184..83a2816 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6
index 952681e..79a983e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7
index cefc912..e136690 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8
index afb093d..bc70fe4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9
index 9265fb7..d18cedd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam
index c3ff07b..4a6fa1d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra
index 5962550..38685d4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan
index 73a4d01..aff8d82 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens
index 9f3a067..231bf9c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast
index ac02a81..323cd38 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade
index 27de456..a1bf928 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin
index 7f6d958..465546b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava
index ce8f433..fb7c145 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels
index 40d7124..3197327 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest
index 4303b90..efa689b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest
index 6b94a4f..940be46 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen
index ad6cf59..388df29 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau
index 5ee23fe..6970b14 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen
index 776be6e..45984a7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin
index 1d99490..17d2b15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar
index 117aadb..017bb2e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey
index ac02a81..d40bcaa 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki
index b4f8f9c..ff5e565 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man
index ac02a81..b0a37e7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul
index 508446b..c891866 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey
index ac02a81..9a10a2e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad
index cc99bea..0ec4756 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
index 9337c9e..4e02685 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov
index a3b5320..bfac561 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv
new file mode 100644
index 0000000..4e02685
--- /dev/null
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon
index 355817b..f0c70b6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana
index 27de456..fdb9e86 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/London b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/London
index ac02a81..323cd38 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/London
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/London
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg
index c4ca733..682bcbf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid
index 16f6420..60bdf4d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta
index bf2452d..27539c2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn
index b4f8f9c..ff5e565 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk
index 453306c..30d3a67 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco
index 686ae88..f30dfc7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow
index ddb3f4e..5e6b6de 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia
index f7f10ab..c210d0a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo
index 15a34c3..dfc5095 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris
index ca85435..00a2726 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica
index 27de456..a1bf928 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague
index ce8f433..fb7c145 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga
index 8db477d..26af4c9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome
index ac4c163..639ca3b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara
index 97d5dd9..8d0c26e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino
index ac4c163..639ca3b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo
index 27de456..53db056 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov
index 8fd5f6d..2684d8f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
index 432e831..298b832 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje
index 27de456..036361c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia
index 0e4d879..eabc972 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm
index f3e0c7f..dd3eb32 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn
index b5acca3..5321bbd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane
index 0b86017..743a733 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol
index 5ee23fe..6970b14 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk
index 7b61bdc..bb842cb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
index 66ae8d6..4e02685 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz
index ad6cf59..28465d8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican
index ac4c163..639ca3b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna
index 3582bb1..75339e9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius
index 7abd63f..75b2eeb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd
index d1cfac0..0715d58 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw
index e33cf67..efe1a40 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb
index 27de456..8e13ede 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
index e42edfc..4e02685 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich
index ad6cf59..388df29 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Factory b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Factory
index 60aa2a0..b4dd773 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Factory
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Factory
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB
index ac02a81..323cd38 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire
index ac02a81..323cd38 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT+0 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT+0
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT+0
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT+0
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT-0 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT-0
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT-0
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT-0
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT0 b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT0
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT0
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT0
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Greenwich b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Greenwich
index c634746..157573b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Greenwich
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Greenwich
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/HST b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/HST
index cccd45e..160a53e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/HST
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/HST
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Hongkong b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Hongkong
index 23d0375..c80e364 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Hongkong
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Hongkong
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iceland b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iceland
index 10e0fc8..2451aca 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iceland
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iceland
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo
index 9a2918f..0bf86f0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos
index 93d6dda..8b8ce22 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas
index d18c381..766024b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos
index f8116e7..1175034 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro
index 9a2918f..640b3e8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen
index cde4cf7..8ce93e0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe
index cba7dfe..e7fccf8 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives
index 7c839cf..58a82e4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius
index 17f2616..7c11134 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte
index 9a2918f..7a009c3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion
index dfe0831..248a7c9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran
index 8cec5ad..824acb0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Israel b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Israel
index 440ef06..4c49bbf 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Israel
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Israel
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Jamaica b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Jamaica
index 2a9b7fd..be6b1b6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Jamaica
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Jamaica
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Japan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Japan
index 26f4d34..1aa066c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Japan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Japan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein
index 1a7975f..9416d52 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Libya b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Libya
index 07b393b..e0c8997 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Libya
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Libya
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MET b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MET
index 4a826bb..6f0558c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MET
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MET
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST
index c93a58e..a0953d1 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT
index 4506a6e..137867c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte
index ada6bf7..e8be26b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur
index e4a7857..97d4d36 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General
index e7fb6f2..80a415c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ
index 6575fdc..afb3929 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT
index c004109..f06065e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Navajo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Navajo
index 5fbe26b..09e54e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Navajo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Navajo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PRC b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PRC
index 91f6f8b..d6b6698 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PRC
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PRC
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT
index 99d246b..fde4833 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia
index dab1f3f..a6b835a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland
index 6575fdc..afb3929 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville
index 2892d26..7c66709 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham
index c004109..f06065e 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk
index 07c84b7..ea3fb5c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter
index cae3744..54dff00 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate
index 6015017..bf7471d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury
index f0b8252..2b6a060 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo
index e40307f..b7b3021 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji
index d39bf53..610b850 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti
index ea72863..78ab35b 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos
index 31f0921..a9403ec 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier
index e1fc3da..ddfc34f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal
index 7e9d10a..720c679 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam
index 66490d2..bf9a2d9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu
index c7cd060..40e3d49 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston
index c7cd060..40e3d49 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton
new file mode 100644
index 0000000..2b6a060
--- /dev/null
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati
index 7cae0cb..2f676d3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae
index a584aae..f5d5824 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein
index 1a7975f..9416d52 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro
index 9ef8374..9228ee0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas
index 74d6792..6ea24b7 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway
index cb56709..b25364c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru
index acec042..ae13aac 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue
index 684b010..be874e2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk
index 53c1aad..79e2a94 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea
index 931a1a3..824f814 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago
index cb56709..001289c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau
index 146b351..bc8eb7a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn
index ef91b06..8a4ba4d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei
index c298ddd..b92b254 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape
index c298ddd..b92b254 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby
index 920ad27..5d8fc3a 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga
index da6b0fa..7220bda 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan
index 66490d2..9539353 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa
index cb56709..001289c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti
index 442b8eb..50a064f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa
index 3db6c75..6bc2168 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu
index 5553c60..f28c840 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk
index 07c84b7..ea3fb5c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake
index c9e3106..71cca88 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis
index b35344b..4bce893 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap
index 07c84b7..ea3fb5c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Poland b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Poland
index e33cf67..efe1a40 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Poland
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Poland
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal
index 355817b..f0c70b6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROC b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROC
index 24c4344..35d89d0 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROC
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROC
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROK b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROK
index 96199e7..1755147 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROK
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROK
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Singapore b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Singapore
index 2364b21..dbbdea3 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Singapore
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Singapore
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Turkey b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Turkey
index 508446b..c891866 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Turkey
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Turkey
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UCT b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UCT
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UCT
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UCT
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska
index 9bbb2fd..cdf0572 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian
index 4323649..b1497bd 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona
index ac6bb0c..c2bd2f9 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Central b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Central
index a5b1617..b016880 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Central
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Central
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana
index 09511cc..6b08d15 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern
index 2f75480..2b6c2ee 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii
index c7cd060..40e3d49 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke
index fcd408d..b187d5f 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan
index e104faa..6eb3ac4 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain
index 5fbe26b..09e54e5 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific
index 9dad4f4..aaf0778 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa
index cb56709..001289c 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UTC b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UTC
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UTC
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UTC
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Universal b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Universal
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Universal
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Universal
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/W-SU b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/W-SU
index ddb3f4e..5e6b6de 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/W-SU
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/W-SU
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/WET b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/WET
index c27390b..423c6c2 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/WET
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/WET
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Zulu b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Zulu
index 91558be..00841a6 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Zulu
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Zulu
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab
index a4ff61a..be3348d 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab
@@ -3,13 +3,13 @@
 # This file is in the public domain, so clarified as of
 # 2009-05-17 by Arthur David Olson.
 #
-# From Paul Eggert (2015-05-02):
+# From Paul Eggert (2022-11-18):
 # This file contains a table of two-letter country codes.  Columns are
 # separated by a single tab.  Lines beginning with '#' are comments.
 # All text uses UTF-8 encoding.  The columns of the table are as follows:
 #
 # 1.  ISO 3166-1 alpha-2 country code, current as of
-#     ISO 3166-1 N976 (2018-11-06).  See: Updates on ISO 3166-1
+#     ISO 3166-1 N1087 (2022-09-02).  See: Updates on ISO 3166-1
 #     https://isotc.iso.org/livelink/livelink/Open/16944257
 # 2.  The usual English name for the coded region,
 #     chosen so that alphabetic sorting of subsets produces helpful lists.
@@ -238,7 +238,7 @@
 SZ	Eswatini (Swaziland)
 TC	Turks & Caicos Is
 TD	Chad
-TF	French Southern & Antarctic Lands
+TF	French S. Terr.
 TG	Togo
 TH	Thailand
 TJ	Tajikistan
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/localtime b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/localtime
deleted file mode 100644
index afeeb88..0000000
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/localtime
+++ /dev/null
Binary files differ
diff --git a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab
index 53ee77e..1f1cecb 100644
--- a/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab
+++ b/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab
@@ -18,7 +18,10 @@
 #     Please see the theory.html file for how these names are chosen.
 #     If multiple timezones overlap a country, each has a row in the
 #     table, with each column 1 containing the country code.
-# 4.  Comments; present if and only if a country has multiple timezones.
+# 4.  Comments; present if and only if countries have multiple timezones,
+#     and useful only for those countries.  For example, the comments
+#     for the row with countries CH,DE,LI and name Europe/Zurich
+#     are useful only for DE, since CH and LI have no other timezones.
 #
 # If a timezone covers multiple countries, the most-populous city is used,
 # and that country is listed first in column 1; any other countries
@@ -34,21 +37,18 @@
 #country-
 #codes	coordinates	TZ	comments
 AD	+4230+00131	Europe/Andorra
-AE,OM	+2518+05518	Asia/Dubai
+AE,OM,RE,SC,TF	+2518+05518	Asia/Dubai	Crozet, Scattered Is
 AF	+3431+06912	Asia/Kabul
 AL	+4120+01950	Europe/Tirane
 AM	+4011+04430	Asia/Yerevan
 AQ	-6617+11031	Antarctica/Casey	Casey
 AQ	-6835+07758	Antarctica/Davis	Davis
-AQ	-6640+14001	Antarctica/DumontDUrville	Dumont-d'Urville
 AQ	-6736+06253	Antarctica/Mawson	Mawson
 AQ	-6448-06406	Antarctica/Palmer	Palmer
 AQ	-6734-06808	Antarctica/Rothera	Rothera
-AQ	-690022+0393524	Antarctica/Syowa	Syowa
 AQ	-720041+0023206	Antarctica/Troll	Troll
-AQ	-7824+10654	Antarctica/Vostok	Vostok
 AR	-3436-05827	America/Argentina/Buenos_Aires	Buenos Aires (BA, CF)
-AR	-3124-06411	America/Argentina/Cordoba	Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF)
+AR	-3124-06411	America/Argentina/Cordoba	most areas: CB, CC, CN, ER, FM, MN, SE, SF
 AR	-2447-06525	America/Argentina/Salta	Salta (SA, LP, NQ, RN)
 AR	-2411-06518	America/Argentina/Jujuy	Jujuy (JY)
 AR	-2649-06513	America/Argentina/Tucuman	Tucumán (TM)
@@ -59,12 +59,11 @@
 AR	-3319-06621	America/Argentina/San_Luis	San Luis (SL)
 AR	-5138-06913	America/Argentina/Rio_Gallegos	Santa Cruz (SC)
 AR	-5448-06818	America/Argentina/Ushuaia	Tierra del Fuego (TF)
-AS,UM	-1416-17042	Pacific/Pago_Pago	Samoa, Midway
+AS,UM	-1416-17042	Pacific/Pago_Pago	Midway
 AT	+4813+01620	Europe/Vienna
 AU	-3133+15905	Australia/Lord_Howe	Lord Howe Island
 AU	-5430+15857	Antarctica/Macquarie	Macquarie Island
-AU	-4253+14719	Australia/Hobart	Tasmania (most areas)
-AU	-3956+14352	Australia/Currie	Tasmania (King Island)
+AU	-4253+14719	Australia/Hobart	Tasmania
 AU	-3749+14458	Australia/Melbourne	Victoria
 AU	-3352+15113	Australia/Sydney	New South Wales (most areas)
 AU	-3157+14127	Australia/Broken_Hill	New South Wales (Yancowinna)
@@ -77,10 +76,9 @@
 AZ	+4023+04951	Asia/Baku
 BB	+1306-05937	America/Barbados
 BD	+2343+09025	Asia/Dhaka
-BE	+5050+00420	Europe/Brussels
+BE,LU,NL	+5050+00420	Europe/Brussels
 BG	+4241+02319	Europe/Sofia
 BM	+3217-06446	Atlantic/Bermuda
-BN	+0456+11455	Asia/Brunei
 BO	-1630-06809	America/La_Paz
 BR	-0351-03225	America/Noronha	Atlantic islands
 BR	-0127-04829	America/Belem	Pará (east); Amapá
@@ -98,7 +96,6 @@
 BR	-0308-06001	America/Manaus	Amazonas (east)
 BR	-0640-06952	America/Eirunepe	Amazonas (west)
 BR	-0958-06748	America/Rio_Branco	Acre
-BS	+2505-07721	America/Nassau
 BT	+2728+08939	Asia/Thimphu
 BY	+5354+02734	Europe/Minsk
 BZ	+1730-08812	America/Belize
@@ -107,49 +104,37 @@
 CA	+4612-05957	America/Glace_Bay	Atlantic - NS (Cape Breton)
 CA	+4606-06447	America/Moncton	Atlantic - New Brunswick
 CA	+5320-06025	America/Goose_Bay	Atlantic - Labrador (most areas)
-CA	+5125-05707	America/Blanc-Sablon	AST - QC (Lower North Shore)
-CA	+4339-07923	America/Toronto	Eastern - ON, QC (most areas)
-CA	+4901-08816	America/Nipigon	Eastern - ON, QC (no DST 1967-73)
-CA	+4823-08915	America/Thunder_Bay	Eastern - ON (Thunder Bay)
-CA	+6344-06828	America/Iqaluit	Eastern - NU (most east areas)
-CA	+6608-06544	America/Pangnirtung	Eastern - NU (Pangnirtung)
-CA	+484531-0913718	America/Atikokan	EST - ON (Atikokan); NU (Coral H)
+CA,BS	+4339-07923	America/Toronto	Eastern - ON, QC (most areas)
+CA	+6344-06828	America/Iqaluit	Eastern - NU (most areas)
 CA	+4953-09709	America/Winnipeg	Central - ON (west); Manitoba
-CA	+4843-09434	America/Rainy_River	Central - ON (Rainy R, Ft Frances)
 CA	+744144-0944945	America/Resolute	Central - NU (Resolute)
 CA	+624900-0920459	America/Rankin_Inlet	Central - NU (central)
 CA	+5024-10439	America/Regina	CST - SK (most areas)
 CA	+5017-10750	America/Swift_Current	CST - SK (midwest)
-CA	+5333-11328	America/Edmonton	Mountain - AB; BC (E); SK (W)
+CA	+5333-11328	America/Edmonton	Mountain - AB; BC (E); NT (E); SK (W)
 CA	+690650-1050310	America/Cambridge_Bay	Mountain - NU (west)
-CA	+6227-11421	America/Yellowknife	Mountain - NT (central)
 CA	+682059-1334300	America/Inuvik	Mountain - NT (west)
-CA	+4906-11631	America/Creston	MST - BC (Creston)
-CA	+5946-12014	America/Dawson_Creek	MST - BC (Dawson Cr, Ft St John)
+CA	+5546-12014	America/Dawson_Creek	MST - BC (Dawson Cr, Ft St John)
 CA	+5848-12242	America/Fort_Nelson	MST - BC (Ft Nelson)
+CA	+6043-13503	America/Whitehorse	MST - Yukon (east)
+CA	+6404-13925	America/Dawson	MST - Yukon (west)
 CA	+4916-12307	America/Vancouver	Pacific - BC (most areas)
-CA	+6043-13503	America/Whitehorse	Pacific - Yukon (east)
-CA	+6404-13925	America/Dawson	Pacific - Yukon (west)
-CC	-1210+09655	Indian/Cocos
-CH,DE,LI	+4723+00832	Europe/Zurich	Swiss time
-CI,BF,GM,GN,ML,MR,SH,SL,SN,TG	+0519-00402	Africa/Abidjan
+CH,DE,LI	+4723+00832	Europe/Zurich	Büsingen
+CI,BF,GH,GM,GN,IS,ML,MR,SH,SL,SN,TG	+0519-00402	Africa/Abidjan
 CK	-2114-15946	Pacific/Rarotonga
-CL	-3327-07040	America/Santiago	Chile (most areas)
+CL	-3327-07040	America/Santiago	most of Chile
 CL	-5309-07055	America/Punta_Arenas	Region of Magallanes
 CL	-2709-10926	Pacific/Easter	Easter Island
 CN	+3114+12128	Asia/Shanghai	Beijing Time
-CN	+4348+08735	Asia/Urumqi	Xinjiang Time
+CN,AQ	+4348+08735	Asia/Urumqi	Xinjiang Time, Vostok
 CO	+0436-07405	America/Bogota
 CR	+0956-08405	America/Costa_Rica
 CU	+2308-08222	America/Havana
 CV	+1455-02331	Atlantic/Cape_Verde
-CW,AW,BQ,SX	+1211-06900	America/Curacao
-CX	-1025+10543	Indian/Christmas
-CY	+3510+03322	Asia/Nicosia	Cyprus (most areas)
+CY	+3510+03322	Asia/Nicosia	most of Cyprus
 CY	+3507+03357	Asia/Famagusta	Northern Cyprus
 CZ,SK	+5005+01426	Europe/Prague
-DE	+5230+01322	Europe/Berlin	Germany (most areas)
-DK	+5540+01235	Europe/Copenhagen
+DE,DK,NO,SE,SJ	+5230+01322	Europe/Berlin	most of Germany
 DO	+1828-06954	America/Santo_Domingo
 DZ	+3647+00303	Africa/Algiers
 EC	-0210-07950	America/Guayaquil	Ecuador (mainland)
@@ -163,17 +148,14 @@
 FI,AX	+6010+02458	Europe/Helsinki
 FJ	-1808+17825	Pacific/Fiji
 FK	-5142-05751	Atlantic/Stanley
-FM	+0725+15147	Pacific/Chuuk	Chuuk/Truk, Yap
-FM	+0658+15813	Pacific/Pohnpei	Pohnpei/Ponape
 FM	+0519+16259	Pacific/Kosrae	Kosrae
 FO	+6201-00646	Atlantic/Faroe
-FR	+4852+00220	Europe/Paris
+FR,MC	+4852+00220	Europe/Paris
 GB,GG,IM,JE	+513030-0000731	Europe/London
 GE	+4143+04449	Asia/Tbilisi
 GF	+0456-05220	America/Cayenne
-GH	+0533-00013	Africa/Accra
 GI	+3608-00521	Europe/Gibraltar
-GL	+6411-05144	America/Nuuk	Greenland (most areas)
+GL	+6411-05144	America/Nuuk	most of Greenland
 GL	+7646-01840	America/Danmarkshavn	National Park (east coast)
 GL	+7029-02158	America/Scoresbysund	Scoresbysund/Ittoqqortoormiit
 GL	+7634-06847	America/Thule	Thule/Pituffik
@@ -197,19 +179,18 @@
 IO	-0720+07225	Indian/Chagos
 IQ	+3321+04425	Asia/Baghdad
 IR	+3540+05126	Asia/Tehran
-IS	+6409-02151	Atlantic/Reykjavik
 IT,SM,VA	+4154+01229	Europe/Rome
 JM	+175805-0764736	America/Jamaica
 JO	+3157+03556	Asia/Amman
 JP	+353916+1394441	Asia/Tokyo
 KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT	-0117+03649	Africa/Nairobi
 KG	+4254+07436	Asia/Bishkek
-KI	+0125+17300	Pacific/Tarawa	Gilbert Islands
-KI	-0308-17105	Pacific/Enderbury	Phoenix Islands
+KI,MH,TV,UM,WF	+0125+17300	Pacific/Tarawa	Gilberts, Marshalls, Wake
+KI	-0247-17143	Pacific/Kanton	Phoenix Islands
 KI	+0152-15720	Pacific/Kiritimati	Line Islands
 KP	+3901+12545	Asia/Pyongyang
 KR	+3733+12658	Asia/Seoul
-KZ	+4315+07657	Asia/Almaty	Kazakhstan (most areas)
+KZ	+4315+07657	Asia/Almaty	most of Kazakhstan
 KZ	+4448+06528	Asia/Qyzylorda	Qyzylorda/Kyzylorda/Kzyl-Orda
 KZ	+5312+06337	Asia/Qostanay	Qostanay/Kostanay/Kustanay
 KZ	+5017+05710	Asia/Aqtobe	Aqtöbe/Aktobe
@@ -220,62 +201,57 @@
 LK	+0656+07951	Asia/Colombo
 LR	+0618-01047	Africa/Monrovia
 LT	+5441+02519	Europe/Vilnius
-LU	+4936+00609	Europe/Luxembourg
 LV	+5657+02406	Europe/Riga
 LY	+3254+01311	Africa/Tripoli
 MA	+3339-00735	Africa/Casablanca
-MC	+4342+00723	Europe/Monaco
 MD	+4700+02850	Europe/Chisinau
-MH	+0709+17112	Pacific/Majuro	Marshall Islands (most areas)
 MH	+0905+16720	Pacific/Kwajalein	Kwajalein
-MM	+1647+09610	Asia/Yangon
-MN	+4755+10653	Asia/Ulaanbaatar	Mongolia (most areas)
+MM,CC	+1647+09610	Asia/Yangon
+MN	+4755+10653	Asia/Ulaanbaatar	most of Mongolia
 MN	+4801+09139	Asia/Hovd	Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan
 MN	+4804+11430	Asia/Choibalsan	Dornod, Sükhbaatar
 MO	+221150+1133230	Asia/Macau
 MQ	+1436-06105	America/Martinique
 MT	+3554+01431	Europe/Malta
 MU	-2010+05730	Indian/Mauritius
-MV	+0410+07330	Indian/Maldives
-MX	+1924-09909	America/Mexico_City	Central Time
-MX	+2105-08646	America/Cancun	Eastern Standard Time - Quintana Roo
-MX	+2058-08937	America/Merida	Central Time - Campeche, Yucatán
-MX	+2540-10019	America/Monterrey	Central Time - Durango; Coahuila, Nuevo León, Tamaulipas (most areas)
-MX	+2550-09730	America/Matamoros	Central Time US - Coahuila, Nuevo León, Tamaulipas (US border)
-MX	+2313-10625	America/Mazatlan	Mountain Time - Baja California Sur, Nayarit, Sinaloa
-MX	+2838-10605	America/Chihuahua	Mountain Time - Chihuahua (most areas)
-MX	+2934-10425	America/Ojinaga	Mountain Time US - Chihuahua (US border)
-MX	+2904-11058	America/Hermosillo	Mountain Standard Time - Sonora
-MX	+3232-11701	America/Tijuana	Pacific Time US - Baja California
-MX	+2048-10515	America/Bahia_Banderas	Central Time - Bahía de Banderas
-MY	+0310+10142	Asia/Kuala_Lumpur	Malaysia (peninsula)
-MY	+0133+11020	Asia/Kuching	Sabah, Sarawak
+MV,TF	+0410+07330	Indian/Maldives	Kerguelen, St Paul I, Amsterdam I
+MX	+1924-09909	America/Mexico_City	Central Mexico
+MX	+2105-08646	America/Cancun	Quintana Roo
+MX	+2058-08937	America/Merida	Campeche, Yucatán
+MX	+2540-10019	America/Monterrey	Durango; Coahuila, Nuevo León, Tamaulipas (most areas)
+MX	+2550-09730	America/Matamoros	Coahuila, Nuevo León, Tamaulipas (US border)
+MX	+2838-10605	America/Chihuahua	Chihuahua (most areas)
+MX	+3144-10629	America/Ciudad_Juarez	Chihuahua (US border - west)
+MX	+2934-10425	America/Ojinaga	Chihuahua (US border - east)
+MX	+2313-10625	America/Mazatlan	Baja California Sur, Nayarit (most areas), Sinaloa
+MX	+2048-10515	America/Bahia_Banderas	Bahía de Banderas
+MX	+2904-11058	America/Hermosillo	Sonora
+MX	+3232-11701	America/Tijuana	Baja California
+MY,BN	+0133+11020	Asia/Kuching	Sabah, Sarawak
 MZ,BI,BW,CD,MW,RW,ZM,ZW	-2558+03235	Africa/Maputo	Central Africa Time
 NA	-2234+01706	Africa/Windhoek
 NC	-2216+16627	Pacific/Noumea
 NF	-2903+16758	Pacific/Norfolk
 NG,AO,BJ,CD,CF,CG,CM,GA,GQ,NE	+0627+00324	Africa/Lagos	West Africa Time
 NI	+1209-08617	America/Managua
-NL	+5222+00454	Europe/Amsterdam
-NO,SJ	+5955+01045	Europe/Oslo
 NP	+2743+08519	Asia/Kathmandu
 NR	-0031+16655	Pacific/Nauru
 NU	-1901-16955	Pacific/Niue
 NZ,AQ	-3652+17446	Pacific/Auckland	New Zealand time
 NZ	-4357-17633	Pacific/Chatham	Chatham Islands
-PA,KY	+0858-07932	America/Panama
+PA,CA,KY	+0858-07932	America/Panama	EST - ON (Atikokan), NU (Coral H)
 PE	-1203-07703	America/Lima
 PF	-1732-14934	Pacific/Tahiti	Society Islands
 PF	-0900-13930	Pacific/Marquesas	Marquesas Islands
 PF	-2308-13457	Pacific/Gambier	Gambier Islands
-PG	-0930+14710	Pacific/Port_Moresby	Papua New Guinea (most areas)
+PG,AQ,FM	-0930+14710	Pacific/Port_Moresby	Papua New Guinea (most areas), Chuuk, Yap, Dumont d'Urville
 PG	-0613+15534	Pacific/Bougainville	Bougainville
 PH	+1435+12100	Asia/Manila
 PK	+2452+06703	Asia/Karachi
 PL	+5215+02100	Europe/Warsaw
 PM	+4703-05620	America/Miquelon
 PN	-2504-13005	Pacific/Pitcairn
-PR	+182806-0660622	America/Puerto_Rico
+PR,AG,CA,AI,AW,BL,BQ,CW,DM,GD,GP,KN,LC,MF,MS,SX,TT,VC,VG,VI	+182806-0660622	America/Puerto_Rico	AST
 PS	+3130+03428	Asia/Gaza	Gaza Strip
 PS	+313200+0350542	Asia/Hebron	West Bank
 PT	+3843-00908	Europe/Lisbon	Portugal (mainland)
@@ -284,7 +260,6 @@
 PW	+0720+13429	Pacific/Palau
 PY	-2516-05740	America/Asuncion
 QA,BH	+2517+05132	Asia/Qatar
-RE,TF	-2052+05528	Indian/Reunion	Réunion, Crozet, Scattered Islands
 RO	+4426+02606	Europe/Bucharest
 RS,BA,HR,ME,MK,SI	+4450+02030	Europe/Belgrade
 RU	+5443+02030	Europe/Kaliningrad	MSK-01 - Kaliningrad
@@ -292,8 +267,8 @@
 # Mention RU and UA alphabetically.  See "territorial claims" above.
 RU,UA	+4457+03406	Europe/Simferopol	Crimea
 RU	+5836+04939	Europe/Kirov	MSK+00 - Kirov
+RU	+4844+04425	Europe/Volgograd	MSK+00 - Volgograd
 RU	+4621+04803	Europe/Astrakhan	MSK+01 - Astrakhan
-RU	+4844+04425	Europe/Volgograd	MSK+01 - Volgograd
 RU	+5134+04602	Europe/Saratov	MSK+01 - Saratov
 RU	+5420+04824	Europe/Ulyanovsk	MSK+01 - Ulyanovsk
 RU	+5312+05009	Europe/Samara	MSK+01 - Samara, Udmurtia
@@ -312,15 +287,13 @@
 RU	+643337+1431336	Asia/Ust-Nera	MSK+07 - Oymyakonsky
 RU	+5934+15048	Asia/Magadan	MSK+08 - Magadan
 RU	+4658+14242	Asia/Sakhalin	MSK+08 - Sakhalin Island
-RU	+6728+15343	Asia/Srednekolymsk	MSK+08 - Sakha (E); North Kuril Is
+RU	+6728+15343	Asia/Srednekolymsk	MSK+08 - Sakha (E); N Kuril Is
 RU	+5301+15839	Asia/Kamchatka	MSK+09 - Kamchatka
 RU	+6445+17729	Asia/Anadyr	MSK+09 - Bering Sea
-SA,KW,YE	+2438+04643	Asia/Riyadh
-SB	-0932+16012	Pacific/Guadalcanal
-SC	-0440+05528	Indian/Mahe
+SA,AQ,KW,YE	+2438+04643	Asia/Riyadh	Syowa
+SB,FM	-0932+16012	Pacific/Guadalcanal	Pohnpei
 SD	+1536+03232	Africa/Khartoum
-SE	+5920+01803	Europe/Stockholm
-SG	+0117+10351	Asia/Singapore
+SG,MY	+0117+10351	Asia/Singapore	peninsular Malaysia
 SR	+0550-05510	America/Paramaribo
 SS	+0451+03137	Africa/Juba
 ST	+0020+00644	Africa/Sao_Tome
@@ -328,22 +301,16 @@
 SY	+3330+03618	Asia/Damascus
 TC	+2128-07108	America/Grand_Turk
 TD	+1207+01503	Africa/Ndjamena
-TF	-492110+0701303	Indian/Kerguelen	Kerguelen, St Paul Island, Amsterdam Island
-TH,KH,LA,VN	+1345+10031	Asia/Bangkok	Indochina (most areas)
+TH,CX,KH,LA,VN	+1345+10031	Asia/Bangkok	north Vietnam
 TJ	+3835+06848	Asia/Dushanbe
 TK	-0922-17114	Pacific/Fakaofo
 TL	-0833+12535	Asia/Dili
 TM	+3757+05823	Asia/Ashgabat
 TN	+3648+01011	Africa/Tunis
-TO	-2110-17510	Pacific/Tongatapu
+TO	-210800-1751200	Pacific/Tongatapu
 TR	+4101+02858	Europe/Istanbul
-TT,AG,AI,BL,DM,GD,GP,KN,LC,MF,MS,VC,VG,VI	+1039-06131	America/Port_of_Spain
-TV	-0831+17913	Pacific/Funafuti
 TW	+2503+12130	Asia/Taipei
-UA	+5026+03031	Europe/Kiev	Ukraine (most areas)
-UA	+4837+02218	Europe/Uzhgorod	Transcarpathia
-UA	+4750+03510	Europe/Zaporozhye	Zaporozhye and east Lugansk
-UM	+1917+16637	Pacific/Wake	Wake Island
+UA	+5026+03031	Europe/Kyiv	most of Ukraine
 US	+404251-0740023	America/New_York	Eastern (most areas)
 US	+421953-0830245	America/Detroit	Eastern - MI (most areas)
 US	+381515-0854534	America/Kentucky/Louisville	Eastern - KY (Louisville area)
@@ -363,7 +330,7 @@
 US	+471551-1014640	America/North_Dakota/Beulah	Central - ND (Mercer)
 US	+394421-1045903	America/Denver	Mountain (most areas)
 US	+433649-1161209	America/Boise	Mountain - ID (south); OR (east)
-US	+332654-1120424	America/Phoenix	MST - Arizona (except Navajo)
+US,CA	+332654-1120424	America/Phoenix	MST - AZ (most areas), Creston BC
 US	+340308-1181434	America/Los_Angeles	Pacific
 US	+611305-1495401	America/Anchorage	Alaska (most areas)
 US	+581807-1342511	America/Juneau	Alaska - Juneau area
@@ -371,14 +338,37 @@
 US	+550737-1313435	America/Metlakatla	Alaska - Annette Island
 US	+593249-1394338	America/Yakutat	Alaska - Yakutat
 US	+643004-1652423	America/Nome	Alaska (west)
-US	+515248-1763929	America/Adak	Aleutian Islands
-US,UM	+211825-1575130	Pacific/Honolulu	Hawaii
+US	+515248-1763929	America/Adak	Alaska - western Aleutians
+US	+211825-1575130	Pacific/Honolulu	Hawaii
 UY	-345433-0561245	America/Montevideo
 UZ	+3940+06648	Asia/Samarkand	Uzbekistan (west)
 UZ	+4120+06918	Asia/Tashkent	Uzbekistan (east)
 VE	+1030-06656	America/Caracas
-VN	+1045+10640	Asia/Ho_Chi_Minh	Vietnam (south)
+VN	+1045+10640	Asia/Ho_Chi_Minh	south Vietnam
 VU	-1740+16825	Pacific/Efate
-WF	-1318-17610	Pacific/Wallis
 WS	-1350-17144	Pacific/Apia
 ZA,LS,SZ	-2615+02800	Africa/Johannesburg
+#
+# The next section contains experimental tab-separated comments for
+# use by user agents like tzselect that identify continents and oceans.
+#
+# For example, the comment "#@AQ<tab>Antarctica/" means the country code
+# AQ is in the continent Antarctica regardless of the Zone name,
+# so Pacific/Auckland should be listed under Antarctica as well as
+# under the Pacific because its line's country codes include AQ.
+#
+# If more than one country code is affected each is listed separated
+# by commas, e.g., #@IS,SH<tab>Atlantic/".  If a country code is in
+# more than one continent or ocean, each is listed separated by
+# commas, e.g., the second column of "#@CY,TR<tab>Asia/,Europe/".
+#
+# These experimental comments are present only for country codes where
+# the continent or ocean is not already obvious from the Zone name.
+# For example, there is no such comment for RU since it already
+# corresponds to Zone names starting with both "Europe/" and "Asia/".
+#
+#@AQ	Antarctica/
+#@IS,SH	Atlantic/
+#@CY,TR	Asia/,Europe/
+#@SJ	Arctic/
+#@CC,CX,KM,MG,YT	Indian/
diff --git a/abseil-cpp/absl/time/internal/test_util.cc b/abseil-cpp/absl/time/internal/test_util.cc
index 9a485a0..3e2452e 100644
--- a/abseil-cpp/absl/time/internal/test_util.cc
+++ b/abseil-cpp/absl/time/internal/test_util.cc
@@ -14,15 +14,8 @@
 
 #include "absl/time/internal/test_util.h"
 
-#include <algorithm>
-#include <cstddef>
-#include <cstring>
-
 #include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
-#include "absl/time/internal/cctz/include/cctz/zone_info_source.h"
-
-namespace cctz = absl::time_internal::cctz;
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -37,95 +30,3 @@
 }  // namespace time_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace time_internal {
-namespace cctz_extension {
-namespace {
-
-// Embed the zoneinfo data for time zones used during tests and benchmarks.
-// The data was generated using "xxd -i zoneinfo-file".  There is no need
-// to update the data as long as the tests do not depend on recent changes
-// (and the past rules remain the same).
-#include "absl/time/internal/zoneinfo.inc"
-
-const struct ZoneInfo {
-  const char* name;
-  const char* data;
-  std::size_t length;
-} kZoneInfo[] = {
-    // The three real time zones used by :time_test and :time_benchmark.
-    {"America/Los_Angeles",  //
-     reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
-    {"America/New_York",  //
-     reinterpret_cast<char*>(America_New_York), America_New_York_len},
-    {"Australia/Sydney",  //
-     reinterpret_cast<char*>(Australia_Sydney), Australia_Sydney_len},
-
-    // Other zones named in tests but which should fail to load.
-    {"Invalid/TimeZone", nullptr, 0},
-    {"", nullptr, 0},
-
-    // Also allow for loading the local time zone under TZ=US/Pacific.
-    {"US/Pacific",  //
-     reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
-
-    // Allows use of the local time zone from a system-specific location.
-#ifdef _MSC_VER
-    {"localtime",  //
-     reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
-#else
-    {"/etc/localtime",  //
-     reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
-#endif
-};
-
-class TestZoneInfoSource : public cctz::ZoneInfoSource {
- public:
-  TestZoneInfoSource(const char* data, std::size_t size)
-      : data_(data), end_(data + size) {}
-
-  std::size_t Read(void* ptr, std::size_t size) override {
-    const std::size_t len = std::min<std::size_t>(size, end_ - data_);
-    memcpy(ptr, data_, len);
-    data_ += len;
-    return len;
-  }
-
-  int Skip(std::size_t offset) override {
-    data_ += std::min<std::size_t>(offset, end_ - data_);
-    return 0;
-  }
-
- private:
-  const char* data_;
-  const char* const end_;
-};
-
-std::unique_ptr<cctz::ZoneInfoSource> TestFactory(
-    const std::string& name,
-    const std::function<std::unique_ptr<cctz::ZoneInfoSource>(
-        const std::string& name)>& /*fallback_factory*/) {
-  for (const ZoneInfo& zoneinfo : kZoneInfo) {
-    if (name == zoneinfo.name) {
-      if (zoneinfo.data == nullptr) return nullptr;
-      return std::unique_ptr<cctz::ZoneInfoSource>(
-          new TestZoneInfoSource(zoneinfo.data, zoneinfo.length));
-    }
-  }
-  ABSL_RAW_LOG(FATAL, "Unexpected time zone \"%s\" in test", name.c_str());
-  return nullptr;
-}
-
-}  // namespace
-
-#if !defined(__MINGW32__)
-// MinGW does not support the weak symbol extension mechanism.
-ZoneInfoSourceFactory zone_info_source_factory = TestFactory;
-#endif
-
-}  // namespace cctz_extension
-}  // namespace time_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/abseil-cpp/absl/time/internal/zoneinfo.inc b/abseil-cpp/absl/time/internal/zoneinfo.inc
deleted file mode 100644
index bfed829..0000000
--- a/abseil-cpp/absl/time/internal/zoneinfo.inc
+++ /dev/null
@@ -1,729 +0,0 @@
-unsigned char America_Los_Angeles[] = {
-  0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00,
-  0x9e, 0xa6, 0x48, 0xa0, 0x9f, 0xbb, 0x15, 0x90, 0xa0, 0x86, 0x2a, 0xa0,
-  0xa1, 0x9a, 0xf7, 0x90, 0xcb, 0x89, 0x1a, 0xa0, 0xd2, 0x23, 0xf4, 0x70,
-  0xd2, 0x61, 0x26, 0x10, 0xd6, 0xfe, 0x74, 0x5c, 0xd8, 0x80, 0xad, 0x90,
-  0xda, 0xfe, 0xc3, 0x90, 0xdb, 0xc0, 0x90, 0x10, 0xdc, 0xde, 0xa5, 0x90,
-  0xdd, 0xa9, 0xac, 0x90, 0xde, 0xbe, 0x87, 0x90, 0xdf, 0x89, 0x8e, 0x90,
-  0xe0, 0x9e, 0x69, 0x90, 0xe1, 0x69, 0x70, 0x90, 0xe2, 0x7e, 0x4b, 0x90,
-  0xe3, 0x49, 0x52, 0x90, 0xe4, 0x5e, 0x2d, 0x90, 0xe5, 0x29, 0x34, 0x90,
-  0xe6, 0x47, 0x4a, 0x10, 0xe7, 0x12, 0x51, 0x10, 0xe8, 0x27, 0x2c, 0x10,
-  0xe8, 0xf2, 0x33, 0x10, 0xea, 0x07, 0x0e, 0x10, 0xea, 0xd2, 0x15, 0x10,
-  0xeb, 0xe6, 0xf0, 0x10, 0xec, 0xb1, 0xf7, 0x10, 0xed, 0xc6, 0xd2, 0x10,
-  0xee, 0x91, 0xd9, 0x10, 0xef, 0xaf, 0xee, 0x90, 0xf0, 0x71, 0xbb, 0x10,
-  0xf1, 0x8f, 0xd0, 0x90, 0xf2, 0x7f, 0xc1, 0x90, 0xf3, 0x6f, 0xb2, 0x90,
-  0xf4, 0x5f, 0xa3, 0x90, 0xf5, 0x4f, 0x94, 0x90, 0xf6, 0x3f, 0x85, 0x90,
-  0xf7, 0x2f, 0x76, 0x90, 0xf8, 0x28, 0xa2, 0x10, 0xf9, 0x0f, 0x58, 0x90,
-  0xfa, 0x08, 0x84, 0x10, 0xfa, 0xf8, 0x83, 0x20, 0xfb, 0xe8, 0x66, 0x10,
-  0xfc, 0xd8, 0x65, 0x20, 0xfd, 0xc8, 0x48, 0x10, 0xfe, 0xb8, 0x47, 0x20,
-  0xff, 0xa8, 0x2a, 0x10, 0x00, 0x98, 0x29, 0x20, 0x01, 0x88, 0x0c, 0x10,
-  0x02, 0x78, 0x0b, 0x20, 0x03, 0x71, 0x28, 0x90, 0x04, 0x61, 0x27, 0xa0,
-  0x05, 0x51, 0x0a, 0x90, 0x06, 0x41, 0x09, 0xa0, 0x07, 0x30, 0xec, 0x90,
-  0x07, 0x8d, 0x43, 0xa0, 0x09, 0x10, 0xce, 0x90, 0x09, 0xad, 0xbf, 0x20,
-  0x0a, 0xf0, 0xb0, 0x90, 0x0b, 0xe0, 0xaf, 0xa0, 0x0c, 0xd9, 0xcd, 0x10,
-  0x0d, 0xc0, 0x91, 0xa0, 0x0e, 0xb9, 0xaf, 0x10, 0x0f, 0xa9, 0xae, 0x20,
-  0x10, 0x99, 0x91, 0x10, 0x11, 0x89, 0x90, 0x20, 0x12, 0x79, 0x73, 0x10,
-  0x13, 0x69, 0x72, 0x20, 0x14, 0x59, 0x55, 0x10, 0x15, 0x49, 0x54, 0x20,
-  0x16, 0x39, 0x37, 0x10, 0x17, 0x29, 0x36, 0x20, 0x18, 0x22, 0x53, 0x90,
-  0x19, 0x09, 0x18, 0x20, 0x1a, 0x02, 0x35, 0x90, 0x1a, 0xf2, 0x34, 0xa0,
-  0x1b, 0xe2, 0x17, 0x90, 0x1c, 0xd2, 0x16, 0xa0, 0x1d, 0xc1, 0xf9, 0x90,
-  0x1e, 0xb1, 0xf8, 0xa0, 0x1f, 0xa1, 0xdb, 0x90, 0x20, 0x76, 0x2b, 0x20,
-  0x21, 0x81, 0xbd, 0x90, 0x22, 0x56, 0x0d, 0x20, 0x23, 0x6a, 0xda, 0x10,
-  0x24, 0x35, 0xef, 0x20, 0x25, 0x4a, 0xbc, 0x10, 0x26, 0x15, 0xd1, 0x20,
-  0x27, 0x2a, 0x9e, 0x10, 0x27, 0xfe, 0xed, 0xa0, 0x29, 0x0a, 0x80, 0x10,
-  0x29, 0xde, 0xcf, 0xa0, 0x2a, 0xea, 0x62, 0x10, 0x2b, 0xbe, 0xb1, 0xa0,
-  0x2c, 0xd3, 0x7e, 0x90, 0x2d, 0x9e, 0x93, 0xa0, 0x2e, 0xb3, 0x60, 0x90,
-  0x2f, 0x7e, 0x75, 0xa0, 0x30, 0x93, 0x42, 0x90, 0x31, 0x67, 0x92, 0x20,
-  0x32, 0x73, 0x24, 0x90, 0x33, 0x47, 0x74, 0x20, 0x34, 0x53, 0x06, 0x90,
-  0x35, 0x27, 0x56, 0x20, 0x36, 0x32, 0xe8, 0x90, 0x37, 0x07, 0x38, 0x20,
-  0x38, 0x1c, 0x05, 0x10, 0x38, 0xe7, 0x1a, 0x20, 0x39, 0xfb, 0xe7, 0x10,
-  0x3a, 0xc6, 0xfc, 0x20, 0x3b, 0xdb, 0xc9, 0x10, 0x3c, 0xb0, 0x18, 0xa0,
-  0x3d, 0xbb, 0xab, 0x10, 0x3e, 0x8f, 0xfa, 0xa0, 0x3f, 0x9b, 0x8d, 0x10,
-  0x40, 0x6f, 0xdc, 0xa0, 0x41, 0x84, 0xa9, 0x90, 0x42, 0x4f, 0xbe, 0xa0,
-  0x43, 0x64, 0x8b, 0x90, 0x44, 0x2f, 0xa0, 0xa0, 0x45, 0x44, 0x6d, 0x90,
-  0x45, 0xf3, 0xd3, 0x20, 0x47, 0x2d, 0x8a, 0x10, 0x47, 0xd3, 0xb5, 0x20,
-  0x49, 0x0d, 0x6c, 0x10, 0x49, 0xb3, 0x97, 0x20, 0x4a, 0xed, 0x4e, 0x10,
-  0x4b, 0x9c, 0xb3, 0xa0, 0x4c, 0xd6, 0x6a, 0x90, 0x4d, 0x7c, 0x95, 0xa0,
-  0x4e, 0xb6, 0x4c, 0x90, 0x4f, 0x5c, 0x77, 0xa0, 0x50, 0x96, 0x2e, 0x90,
-  0x51, 0x3c, 0x59, 0xa0, 0x52, 0x76, 0x10, 0x90, 0x53, 0x1c, 0x3b, 0xa0,
-  0x54, 0x55, 0xf2, 0x90, 0x54, 0xfc, 0x1d, 0xa0, 0x56, 0x35, 0xd4, 0x90,
-  0x56, 0xe5, 0x3a, 0x20, 0x58, 0x1e, 0xf1, 0x10, 0x58, 0xc5, 0x1c, 0x20,
-  0x59, 0xfe, 0xd3, 0x10, 0x5a, 0xa4, 0xfe, 0x20, 0x5b, 0xde, 0xb5, 0x10,
-  0x5c, 0x84, 0xe0, 0x20, 0x5d, 0xbe, 0x97, 0x10, 0x5e, 0x64, 0xc2, 0x20,
-  0x5f, 0x9e, 0x79, 0x10, 0x60, 0x4d, 0xde, 0xa0, 0x61, 0x87, 0x95, 0x90,
-  0x62, 0x2d, 0xc0, 0xa0, 0x63, 0x67, 0x77, 0x90, 0x64, 0x0d, 0xa2, 0xa0,
-  0x65, 0x47, 0x59, 0x90, 0x65, 0xed, 0x84, 0xa0, 0x67, 0x27, 0x3b, 0x90,
-  0x67, 0xcd, 0x66, 0xa0, 0x69, 0x07, 0x1d, 0x90, 0x69, 0xad, 0x48, 0xa0,
-  0x6a, 0xe6, 0xff, 0x90, 0x6b, 0x96, 0x65, 0x20, 0x6c, 0xd0, 0x1c, 0x10,
-  0x6d, 0x76, 0x47, 0x20, 0x6e, 0xaf, 0xfe, 0x10, 0x6f, 0x56, 0x29, 0x20,
-  0x70, 0x8f, 0xe0, 0x10, 0x71, 0x36, 0x0b, 0x20, 0x72, 0x6f, 0xc2, 0x10,
-  0x73, 0x15, 0xed, 0x20, 0x74, 0x4f, 0xa4, 0x10, 0x74, 0xff, 0x09, 0xa0,
-  0x76, 0x38, 0xc0, 0x90, 0x76, 0xde, 0xeb, 0xa0, 0x78, 0x18, 0xa2, 0x90,
-  0x78, 0xbe, 0xcd, 0xa0, 0x79, 0xf8, 0x84, 0x90, 0x7a, 0x9e, 0xaf, 0xa0,
-  0x7b, 0xd8, 0x66, 0x90, 0x7c, 0x7e, 0x91, 0xa0, 0x7d, 0xb8, 0x48, 0x90,
-  0x7e, 0x5e, 0x73, 0xa0, 0x7f, 0x98, 0x2a, 0x90, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90,
-  0x01, 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90,
-  0x01, 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00,
-  0x50, 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00,
-  0x50, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-  0x00, 0x01, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0xbb, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x04,
-  0x1a, 0xc0, 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x48, 0xa0, 0xff, 0xff,
-  0xff, 0xff, 0x9f, 0xbb, 0x15, 0x90, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86,
-  0x2a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xf7, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xcb, 0x89, 0x1a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x23,
-  0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x61, 0x26, 0x10, 0xff, 0xff,
-  0xff, 0xff, 0xd6, 0xfe, 0x74, 0x5c, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x80,
-  0xad, 0x90, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xc3, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xdb, 0xc0, 0x90, 0x10, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xde,
-  0xa5, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0xac, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xde, 0xbe, 0x87, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x89,
-  0x8e, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x69, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xe1, 0x69, 0x70, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e,
-  0x4b, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x52, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xe4, 0x5e, 0x2d, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x29,
-  0x34, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x4a, 0x10, 0xff, 0xff,
-  0xff, 0xff, 0xe7, 0x12, 0x51, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x27,
-  0x2c, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xf2, 0x33, 0x10, 0xff, 0xff,
-  0xff, 0xff, 0xea, 0x07, 0x0e, 0x10, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd2,
-  0x15, 0x10, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xf0, 0x10, 0xff, 0xff,
-  0xff, 0xff, 0xec, 0xb1, 0xf7, 0x10, 0xff, 0xff, 0xff, 0xff, 0xed, 0xc6,
-  0xd2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xee, 0x91, 0xd9, 0x10, 0xff, 0xff,
-  0xff, 0xff, 0xef, 0xaf, 0xee, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x71,
-  0xbb, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xd0, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xf2, 0x7f, 0xc1, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x6f,
-  0xb2, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0xa3, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xf5, 0x4f, 0x94, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x3f,
-  0x85, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x76, 0x90, 0xff, 0xff,
-  0xff, 0xff, 0xf8, 0x28, 0xa2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x0f,
-  0x58, 0x90, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x84, 0x10, 0xff, 0xff,
-  0xff, 0xff, 0xfa, 0xf8, 0x83, 0x20, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe8,
-  0x66, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x65, 0x20, 0xff, 0xff,
-  0xff, 0xff, 0xfd, 0xc8, 0x48, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb8,
-  0x47, 0x20, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x2a, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x98, 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88,
-  0x0c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x0b, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x03, 0x71, 0x28, 0x90, 0x00, 0x00, 0x00, 0x00, 0x04, 0x61,
-  0x27, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x05, 0x51, 0x0a, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x06, 0x41, 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x30,
-  0xec, 0x90, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x43, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x09, 0x10, 0xce, 0x90, 0x00, 0x00, 0x00, 0x00, 0x09, 0xad,
-  0xbf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0xb0, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x0b, 0xe0, 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd9,
-  0xcd, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x91, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x0e, 0xb9, 0xaf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xa9,
-  0xae, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x91, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x11, 0x89, 0x90, 0x20, 0x00, 0x00, 0x00, 0x00, 0x12, 0x79,
-  0x73, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x72, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x14, 0x59, 0x55, 0x10, 0x00, 0x00, 0x00, 0x00, 0x15, 0x49,
-  0x54, 0x20, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x37, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x17, 0x29, 0x36, 0x20, 0x00, 0x00, 0x00, 0x00, 0x18, 0x22,
-  0x53, 0x90, 0x00, 0x00, 0x00, 0x00, 0x19, 0x09, 0x18, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x1a, 0x02, 0x35, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf2,
-  0x34, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe2, 0x17, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x1c, 0xd2, 0x16, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1,
-  0xf9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xf8, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x1f, 0xa1, 0xdb, 0x90, 0x00, 0x00, 0x00, 0x00, 0x20, 0x76,
-  0x2b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0xbd, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x22, 0x56, 0x0d, 0x20, 0x00, 0x00, 0x00, 0x00, 0x23, 0x6a,
-  0xda, 0x10, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xef, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x25, 0x4a, 0xbc, 0x10, 0x00, 0x00, 0x00, 0x00, 0x26, 0x15,
-  0xd1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x9e, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x27, 0xfe, 0xed, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x29, 0x0a,
-  0x80, 0x10, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xcf, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x2a, 0xea, 0x62, 0x10, 0x00, 0x00, 0x00, 0x00, 0x2b, 0xbe,
-  0xb1, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x7e, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x2d, 0x9e, 0x93, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb3,
-  0x60, 0x90, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x75, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x30, 0x93, 0x42, 0x90, 0x00, 0x00, 0x00, 0x00, 0x31, 0x67,
-  0x92, 0x20, 0x00, 0x00, 0x00, 0x00, 0x32, 0x73, 0x24, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x33, 0x47, 0x74, 0x20, 0x00, 0x00, 0x00, 0x00, 0x34, 0x53,
-  0x06, 0x90, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x56, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x36, 0x32, 0xe8, 0x90, 0x00, 0x00, 0x00, 0x00, 0x37, 0x07,
-  0x38, 0x20, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1c, 0x05, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x38, 0xe7, 0x1a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x39, 0xfb,
-  0xe7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xfc, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x3b, 0xdb, 0xc9, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb0,
-  0x18, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0xab, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x3e, 0x8f, 0xfa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9b,
-  0x8d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xdc, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x41, 0x84, 0xa9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x42, 0x4f,
-  0xbe, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x8b, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x44, 0x2f, 0xa0, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x45, 0x44,
-  0x6d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xd3, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x47, 0x2d, 0x8a, 0x10, 0x00, 0x00, 0x00, 0x00, 0x47, 0xd3,
-  0xb5, 0x20, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x6c, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x49, 0xb3, 0x97, 0x20, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xed,
-  0x4e, 0x10, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0xb3, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x4c, 0xd6, 0x6a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x7c,
-  0x95, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x4c, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x4f, 0x5c, 0x77, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x50, 0x96,
-  0x2e, 0x90, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x59, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x52, 0x76, 0x10, 0x90, 0x00, 0x00, 0x00, 0x00, 0x53, 0x1c,
-  0x3b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xf2, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x54, 0xfc, 0x1d, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x56, 0x35,
-  0xd4, 0x90, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x3a, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x58, 0x1e, 0xf1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x58, 0xc5,
-  0x1c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xd3, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x5a, 0xa4, 0xfe, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde,
-  0xb5, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xe0, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x5d, 0xbe, 0x97, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x64,
-  0xc2, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x79, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x60, 0x4d, 0xde, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x61, 0x87,
-  0x95, 0x90, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0xc0, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x63, 0x67, 0x77, 0x90, 0x00, 0x00, 0x00, 0x00, 0x64, 0x0d,
-  0xa2, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x59, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x65, 0xed, 0x84, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x67, 0x27,
-  0x3b, 0x90, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x66, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x69, 0x07, 0x1d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x69, 0xad,
-  0x48, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xff, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x6b, 0x96, 0x65, 0x20, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xd0,
-  0x1c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x47, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x6e, 0xaf, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x56,
-  0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xe0, 0x10, 0x00, 0x00,
-  0x00, 0x00, 0x71, 0x36, 0x0b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x72, 0x6f,
-  0xc2, 0x10, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xed, 0x20, 0x00, 0x00,
-  0x00, 0x00, 0x74, 0x4f, 0xa4, 0x10, 0x00, 0x00, 0x00, 0x00, 0x74, 0xff,
-  0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0xc0, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x76, 0xde, 0xeb, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18,
-  0xa2, 0x90, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xcd, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x79, 0xf8, 0x84, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x9e,
-  0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x66, 0x90, 0x00, 0x00,
-  0x00, 0x00, 0x7c, 0x7e, 0x91, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7d, 0xb8,
-  0x48, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x73, 0xa0, 0x00, 0x00,
-  0x00, 0x00, 0x7f, 0x98, 0x2a, 0x90, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90, 0x01,
-  0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90, 0x01,
-  0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x50,
-  0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00, 0x50,
-  0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
-  0x01, 0x0a, 0x50, 0x53, 0x54, 0x38, 0x50, 0x44, 0x54, 0x2c, 0x4d, 0x33,
-  0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31, 0x2e, 0x31, 0x2e, 0x30,
-  0x0a
-};
-unsigned int America_Los_Angeles_len = 2845;
-unsigned char America_New_York[] = {
-  0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00,
-  0x9e, 0xa6, 0x1e, 0x70, 0x9f, 0xba, 0xeb, 0x60, 0xa0, 0x86, 0x00, 0x70,
-  0xa1, 0x9a, 0xcd, 0x60, 0xa2, 0x65, 0xe2, 0x70, 0xa3, 0x83, 0xe9, 0xe0,
-  0xa4, 0x6a, 0xae, 0x70, 0xa5, 0x35, 0xa7, 0x60, 0xa6, 0x53, 0xca, 0xf0,
-  0xa7, 0x15, 0x89, 0x60, 0xa8, 0x33, 0xac, 0xf0, 0xa8, 0xfe, 0xa5, 0xe0,
-  0xaa, 0x13, 0x8e, 0xf0, 0xaa, 0xde, 0x87, 0xe0, 0xab, 0xf3, 0x70, 0xf0,
-  0xac, 0xbe, 0x69, 0xe0, 0xad, 0xd3, 0x52, 0xf0, 0xae, 0x9e, 0x4b, 0xe0,
-  0xaf, 0xb3, 0x34, 0xf0, 0xb0, 0x7e, 0x2d, 0xe0, 0xb1, 0x9c, 0x51, 0x70,
-  0xb2, 0x67, 0x4a, 0x60, 0xb3, 0x7c, 0x33, 0x70, 0xb4, 0x47, 0x2c, 0x60,
-  0xb5, 0x5c, 0x15, 0x70, 0xb6, 0x27, 0x0e, 0x60, 0xb7, 0x3b, 0xf7, 0x70,
-  0xb8, 0x06, 0xf0, 0x60, 0xb9, 0x1b, 0xd9, 0x70, 0xb9, 0xe6, 0xd2, 0x60,
-  0xbb, 0x04, 0xf5, 0xf0, 0xbb, 0xc6, 0xb4, 0x60, 0xbc, 0xe4, 0xd7, 0xf0,
-  0xbd, 0xaf, 0xd0, 0xe0, 0xbe, 0xc4, 0xb9, 0xf0, 0xbf, 0x8f, 0xb2, 0xe0,
-  0xc0, 0xa4, 0x9b, 0xf0, 0xc1, 0x6f, 0x94, 0xe0, 0xc2, 0x84, 0x7d, 0xf0,
-  0xc3, 0x4f, 0x76, 0xe0, 0xc4, 0x64, 0x5f, 0xf0, 0xc5, 0x2f, 0x58, 0xe0,
-  0xc6, 0x4d, 0x7c, 0x70, 0xc7, 0x0f, 0x3a, 0xe0, 0xc8, 0x2d, 0x5e, 0x70,
-  0xc8, 0xf8, 0x57, 0x60, 0xca, 0x0d, 0x40, 0x70, 0xca, 0xd8, 0x39, 0x60,
-  0xcb, 0x88, 0xf0, 0x70, 0xd2, 0x23, 0xf4, 0x70, 0xd2, 0x60, 0xfb, 0xe0,
-  0xd3, 0x75, 0xe4, 0xf0, 0xd4, 0x40, 0xdd, 0xe0, 0xd5, 0x55, 0xc6, 0xf0,
-  0xd6, 0x20, 0xbf, 0xe0, 0xd7, 0x35, 0xa8, 0xf0, 0xd8, 0x00, 0xa1, 0xe0,
-  0xd9, 0x15, 0x8a, 0xf0, 0xd9, 0xe0, 0x83, 0xe0, 0xda, 0xfe, 0xa7, 0x70,
-  0xdb, 0xc0, 0x65, 0xe0, 0xdc, 0xde, 0x89, 0x70, 0xdd, 0xa9, 0x82, 0x60,
-  0xde, 0xbe, 0x6b, 0x70, 0xdf, 0x89, 0x64, 0x60, 0xe0, 0x9e, 0x4d, 0x70,
-  0xe1, 0x69, 0x46, 0x60, 0xe2, 0x7e, 0x2f, 0x70, 0xe3, 0x49, 0x28, 0x60,
-  0xe4, 0x5e, 0x11, 0x70, 0xe5, 0x57, 0x2e, 0xe0, 0xe6, 0x47, 0x2d, 0xf0,
-  0xe7, 0x37, 0x10, 0xe0, 0xe8, 0x27, 0x0f, 0xf0, 0xe9, 0x16, 0xf2, 0xe0,
-  0xea, 0x06, 0xf1, 0xf0, 0xea, 0xf6, 0xd4, 0xe0, 0xeb, 0xe6, 0xd3, 0xf0,
-  0xec, 0xd6, 0xb6, 0xe0, 0xed, 0xc6, 0xb5, 0xf0, 0xee, 0xbf, 0xd3, 0x60,
-  0xef, 0xaf, 0xd2, 0x70, 0xf0, 0x9f, 0xb5, 0x60, 0xf1, 0x8f, 0xb4, 0x70,
-  0xf2, 0x7f, 0x97, 0x60, 0xf3, 0x6f, 0x96, 0x70, 0xf4, 0x5f, 0x79, 0x60,
-  0xf5, 0x4f, 0x78, 0x70, 0xf6, 0x3f, 0x5b, 0x60, 0xf7, 0x2f, 0x5a, 0x70,
-  0xf8, 0x28, 0x77, 0xe0, 0xf9, 0x0f, 0x3c, 0x70, 0xfa, 0x08, 0x59, 0xe0,
-  0xfa, 0xf8, 0x58, 0xf0, 0xfb, 0xe8, 0x3b, 0xe0, 0xfc, 0xd8, 0x3a, 0xf0,
-  0xfd, 0xc8, 0x1d, 0xe0, 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xa7, 0xff, 0xe0,
-  0x00, 0x97, 0xfe, 0xf0, 0x01, 0x87, 0xe1, 0xe0, 0x02, 0x77, 0xe0, 0xf0,
-  0x03, 0x70, 0xfe, 0x60, 0x04, 0x60, 0xfd, 0x70, 0x05, 0x50, 0xe0, 0x60,
-  0x06, 0x40, 0xdf, 0x70, 0x07, 0x30, 0xc2, 0x60, 0x07, 0x8d, 0x19, 0x70,
-  0x09, 0x10, 0xa4, 0x60, 0x09, 0xad, 0x94, 0xf0, 0x0a, 0xf0, 0x86, 0x60,
-  0x0b, 0xe0, 0x85, 0x70, 0x0c, 0xd9, 0xa2, 0xe0, 0x0d, 0xc0, 0x67, 0x70,
-  0x0e, 0xb9, 0x84, 0xe0, 0x0f, 0xa9, 0x83, 0xf0, 0x10, 0x99, 0x66, 0xe0,
-  0x11, 0x89, 0x65, 0xf0, 0x12, 0x79, 0x48, 0xe0, 0x13, 0x69, 0x47, 0xf0,
-  0x14, 0x59, 0x2a, 0xe0, 0x15, 0x49, 0x29, 0xf0, 0x16, 0x39, 0x0c, 0xe0,
-  0x17, 0x29, 0x0b, 0xf0, 0x18, 0x22, 0x29, 0x60, 0x19, 0x08, 0xed, 0xf0,
-  0x1a, 0x02, 0x0b, 0x60, 0x1a, 0xf2, 0x0a, 0x70, 0x1b, 0xe1, 0xed, 0x60,
-  0x1c, 0xd1, 0xec, 0x70, 0x1d, 0xc1, 0xcf, 0x60, 0x1e, 0xb1, 0xce, 0x70,
-  0x1f, 0xa1, 0xb1, 0x60, 0x20, 0x76, 0x00, 0xf0, 0x21, 0x81, 0x93, 0x60,
-  0x22, 0x55, 0xe2, 0xf0, 0x23, 0x6a, 0xaf, 0xe0, 0x24, 0x35, 0xc4, 0xf0,
-  0x25, 0x4a, 0x91, 0xe0, 0x26, 0x15, 0xa6, 0xf0, 0x27, 0x2a, 0x73, 0xe0,
-  0x27, 0xfe, 0xc3, 0x70, 0x29, 0x0a, 0x55, 0xe0, 0x29, 0xde, 0xa5, 0x70,
-  0x2a, 0xea, 0x37, 0xe0, 0x2b, 0xbe, 0x87, 0x70, 0x2c, 0xd3, 0x54, 0x60,
-  0x2d, 0x9e, 0x69, 0x70, 0x2e, 0xb3, 0x36, 0x60, 0x2f, 0x7e, 0x4b, 0x70,
-  0x30, 0x93, 0x18, 0x60, 0x31, 0x67, 0x67, 0xf0, 0x32, 0x72, 0xfa, 0x60,
-  0x33, 0x47, 0x49, 0xf0, 0x34, 0x52, 0xdc, 0x60, 0x35, 0x27, 0x2b, 0xf0,
-  0x36, 0x32, 0xbe, 0x60, 0x37, 0x07, 0x0d, 0xf0, 0x38, 0x1b, 0xda, 0xe0,
-  0x38, 0xe6, 0xef, 0xf0, 0x39, 0xfb, 0xbc, 0xe0, 0x3a, 0xc6, 0xd1, 0xf0,
-  0x3b, 0xdb, 0x9e, 0xe0, 0x3c, 0xaf, 0xee, 0x70, 0x3d, 0xbb, 0x80, 0xe0,
-  0x3e, 0x8f, 0xd0, 0x70, 0x3f, 0x9b, 0x62, 0xe0, 0x40, 0x6f, 0xb2, 0x70,
-  0x41, 0x84, 0x7f, 0x60, 0x42, 0x4f, 0x94, 0x70, 0x43, 0x64, 0x61, 0x60,
-  0x44, 0x2f, 0x76, 0x70, 0x45, 0x44, 0x43, 0x60, 0x45, 0xf3, 0xa8, 0xf0,
-  0x47, 0x2d, 0x5f, 0xe0, 0x47, 0xd3, 0x8a, 0xf0, 0x49, 0x0d, 0x41, 0xe0,
-  0x49, 0xb3, 0x6c, 0xf0, 0x4a, 0xed, 0x23, 0xe0, 0x4b, 0x9c, 0x89, 0x70,
-  0x4c, 0xd6, 0x40, 0x60, 0x4d, 0x7c, 0x6b, 0x70, 0x4e, 0xb6, 0x22, 0x60,
-  0x4f, 0x5c, 0x4d, 0x70, 0x50, 0x96, 0x04, 0x60, 0x51, 0x3c, 0x2f, 0x70,
-  0x52, 0x75, 0xe6, 0x60, 0x53, 0x1c, 0x11, 0x70, 0x54, 0x55, 0xc8, 0x60,
-  0x54, 0xfb, 0xf3, 0x70, 0x56, 0x35, 0xaa, 0x60, 0x56, 0xe5, 0x0f, 0xf0,
-  0x58, 0x1e, 0xc6, 0xe0, 0x58, 0xc4, 0xf1, 0xf0, 0x59, 0xfe, 0xa8, 0xe0,
-  0x5a, 0xa4, 0xd3, 0xf0, 0x5b, 0xde, 0x8a, 0xe0, 0x5c, 0x84, 0xb5, 0xf0,
-  0x5d, 0xbe, 0x6c, 0xe0, 0x5e, 0x64, 0x97, 0xf0, 0x5f, 0x9e, 0x4e, 0xe0,
-  0x60, 0x4d, 0xb4, 0x70, 0x61, 0x87, 0x6b, 0x60, 0x62, 0x2d, 0x96, 0x70,
-  0x63, 0x67, 0x4d, 0x60, 0x64, 0x0d, 0x78, 0x70, 0x65, 0x47, 0x2f, 0x60,
-  0x65, 0xed, 0x5a, 0x70, 0x67, 0x27, 0x11, 0x60, 0x67, 0xcd, 0x3c, 0x70,
-  0x69, 0x06, 0xf3, 0x60, 0x69, 0xad, 0x1e, 0x70, 0x6a, 0xe6, 0xd5, 0x60,
-  0x6b, 0x96, 0x3a, 0xf0, 0x6c, 0xcf, 0xf1, 0xe0, 0x6d, 0x76, 0x1c, 0xf0,
-  0x6e, 0xaf, 0xd3, 0xe0, 0x6f, 0x55, 0xfe, 0xf0, 0x70, 0x8f, 0xb5, 0xe0,
-  0x71, 0x35, 0xe0, 0xf0, 0x72, 0x6f, 0x97, 0xe0, 0x73, 0x15, 0xc2, 0xf0,
-  0x74, 0x4f, 0x79, 0xe0, 0x74, 0xfe, 0xdf, 0x70, 0x76, 0x38, 0x96, 0x60,
-  0x76, 0xde, 0xc1, 0x70, 0x78, 0x18, 0x78, 0x60, 0x78, 0xbe, 0xa3, 0x70,
-  0x79, 0xf8, 0x5a, 0x60, 0x7a, 0x9e, 0x85, 0x70, 0x7b, 0xd8, 0x3c, 0x60,
-  0x7c, 0x7e, 0x67, 0x70, 0x7d, 0xb8, 0x1e, 0x60, 0x7e, 0x5e, 0x49, 0x70,
-  0x7f, 0x98, 0x00, 0x60, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x04,
-  0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x0c,
-  0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x45, 0x44,
-  0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45, 0x57, 0x54, 0x00, 0x45, 0x50,
-  0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
-  0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x03, 0xf0, 0x90,
-  0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x1e, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0x9f, 0xba, 0xeb, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86, 0x00, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xcd, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xa2, 0x65, 0xe2, 0x70, 0xff, 0xff, 0xff, 0xff, 0xa3, 0x83, 0xe9, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xa4, 0x6a, 0xae, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xa5, 0x35, 0xa7, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa6, 0x53, 0xca, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xa7, 0x15, 0x89, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xa8, 0x33, 0xac, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xa8, 0xfe, 0xa5, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xaa, 0x13, 0x8e, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xaa, 0xde, 0x87, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xab, 0xf3, 0x70, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xac, 0xbe, 0x69, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xad, 0xd3, 0x52, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xae, 0x9e, 0x4b, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xaf, 0xb3, 0x34, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xb0, 0x7e, 0x2d, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9c, 0x51, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xb2, 0x67, 0x4a, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xb3, 0x7c, 0x33, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x47, 0x2c, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xb5, 0x5c, 0x15, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xb6, 0x27, 0x0e, 0x60, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x3b, 0xf7, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xb8, 0x06, 0xf0, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xb9, 0x1b, 0xd9, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb9, 0xe6, 0xd2, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xbb, 0x04, 0xf5, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xbb, 0xc6, 0xb4, 0x60, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xe4, 0xd7, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xbd, 0xaf, 0xd0, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xbe, 0xc4, 0xb9, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0xb2, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xc0, 0xa4, 0x9b, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xc1, 0x6f, 0x94, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x84, 0x7d, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xc3, 0x4f, 0x76, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xc4, 0x64, 0x5f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x2f, 0x58, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xc6, 0x4d, 0x7c, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xc7, 0x0f, 0x3a, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x2d, 0x5e, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xc8, 0xf8, 0x57, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xca, 0x0d, 0x40, 0x70, 0xff, 0xff, 0xff, 0xff, 0xca, 0xd8, 0x39, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xcb, 0x88, 0xf0, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xd2, 0x23, 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x60, 0xfb, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xd3, 0x75, 0xe4, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xd4, 0x40, 0xdd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x55, 0xc6, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xd6, 0x20, 0xbf, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xd7, 0x35, 0xa8, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x00, 0xa1, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xd9, 0x15, 0x8a, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xd9, 0xe0, 0x83, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xa7, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xdb, 0xc0, 0x65, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xdc, 0xde, 0x89, 0x70, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0x82, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xde, 0xbe, 0x6b, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xdf, 0x89, 0x64, 0x60, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x4d, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xe1, 0x69, 0x46, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xe2, 0x7e, 0x2f, 0x70, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x28, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xe4, 0x5e, 0x11, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xe5, 0x57, 0x2e, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x2d, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xe7, 0x37, 0x10, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xe8, 0x27, 0x0f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x16, 0xf2, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xea, 0x06, 0xf1, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xea, 0xf6, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xd3, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xec, 0xd6, 0xb6, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xed, 0xc6, 0xb5, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xee, 0xbf, 0xd3, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xef, 0xaf, 0xd2, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xf0, 0x9f, 0xb5, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xb4, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xf2, 0x7f, 0x97, 0x60, 0xff, 0xff, 0xff, 0xff,
-  0xf3, 0x6f, 0x96, 0x70, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0x79, 0x60,
-  0xff, 0xff, 0xff, 0xff, 0xf5, 0x4f, 0x78, 0x70, 0xff, 0xff, 0xff, 0xff,
-  0xf6, 0x3f, 0x5b, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x5a, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xf8, 0x28, 0x77, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xf9, 0x0f, 0x3c, 0x70, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x59, 0xe0,
-  0xff, 0xff, 0xff, 0xff, 0xfa, 0xf8, 0x58, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xfb, 0xe8, 0x3b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x3a, 0xf0,
-  0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0x1d, 0xe0, 0xff, 0xff, 0xff, 0xff,
-  0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0xff, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x01, 0x87, 0xe1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x02, 0x77, 0xe0, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x03, 0x70, 0xfe, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x04, 0x60, 0xfd, 0x70, 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0xe0, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x06, 0x40, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x07, 0x30, 0xc2, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x19, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x09, 0x10, 0xa4, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x09, 0xad, 0x94, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0x86, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x0b, 0xe0, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x0c, 0xd9, 0xa2, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x67, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x0e, 0xb9, 0x84, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x0f, 0xa9, 0x83, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x66, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x11, 0x89, 0x65, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x12, 0x79, 0x48, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x47, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x14, 0x59, 0x2a, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x15, 0x49, 0x29, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x0c, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x17, 0x29, 0x0b, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x18, 0x22, 0x29, 0x60, 0x00, 0x00, 0x00, 0x00, 0x19, 0x08, 0xed, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x1a, 0x02, 0x0b, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x1a, 0xf2, 0x0a, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0xed, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x1c, 0xd1, 0xec, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x1d, 0xc1, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xce, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x1f, 0xa1, 0xb1, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x20, 0x76, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0x93, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x22, 0x55, 0xe2, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x23, 0x6a, 0xaf, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xc4, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x25, 0x4a, 0x91, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x26, 0x15, 0xa6, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x73, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x27, 0xfe, 0xc3, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x29, 0x0a, 0x55, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xa5, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x2a, 0xea, 0x37, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x2b, 0xbe, 0x87, 0x70, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x54, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x2d, 0x9e, 0x69, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x2e, 0xb3, 0x36, 0x60, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x4b, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x30, 0x93, 0x18, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x31, 0x67, 0x67, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0xfa, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x33, 0x47, 0x49, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x34, 0x52, 0xdc, 0x60, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x2b, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x36, 0x32, 0xbe, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x37, 0x07, 0x0d, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0xda, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x38, 0xe6, 0xef, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x39, 0xfb, 0xbc, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xd1, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x3b, 0xdb, 0x9e, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x3c, 0xaf, 0xee, 0x70, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0x80, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x3e, 0x8f, 0xd0, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x3f, 0x9b, 0x62, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xb2, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x41, 0x84, 0x7f, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x42, 0x4f, 0x94, 0x70, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x61, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x44, 0x2f, 0x76, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x45, 0x44, 0x43, 0x60, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xa8, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x47, 0x2d, 0x5f, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x47, 0xd3, 0x8a, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x41, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x49, 0xb3, 0x6c, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x4a, 0xed, 0x23, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0x89, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x4c, 0xd6, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x4d, 0x7c, 0x6b, 0x70, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x22, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x4f, 0x5c, 0x4d, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x50, 0x96, 0x04, 0x60, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x2f, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x52, 0x75, 0xe6, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x53, 0x1c, 0x11, 0x70, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xc8, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x54, 0xfb, 0xf3, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x56, 0x35, 0xaa, 0x60, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x0f, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x58, 0x1e, 0xc6, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x58, 0xc4, 0xf1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xa8, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x5a, 0xa4, 0xd3, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x5b, 0xde, 0x8a, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xb5, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x5d, 0xbe, 0x6c, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x5e, 0x64, 0x97, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x4e, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x60, 0x4d, 0xb4, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x61, 0x87, 0x6b, 0x60, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0x96, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x63, 0x67, 0x4d, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x64, 0x0d, 0x78, 0x70, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x2f, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x65, 0xed, 0x5a, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x67, 0x27, 0x11, 0x60, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x3c, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x69, 0x06, 0xf3, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x69, 0xad, 0x1e, 0x70, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xd5, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x6b, 0x96, 0x3a, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x6c, 0xcf, 0xf1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x1c, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x6e, 0xaf, 0xd3, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x6f, 0x55, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xb5, 0xe0,
-  0x00, 0x00, 0x00, 0x00, 0x71, 0x35, 0xe0, 0xf0, 0x00, 0x00, 0x00, 0x00,
-  0x72, 0x6f, 0x97, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xc2, 0xf0,
-  0x00, 0x00, 0x00, 0x00, 0x74, 0x4f, 0x79, 0xe0, 0x00, 0x00, 0x00, 0x00,
-  0x74, 0xfe, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0x96, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x76, 0xde, 0xc1, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x78, 0x18, 0x78, 0x60, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xa3, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x79, 0xf8, 0x5a, 0x60, 0x00, 0x00, 0x00, 0x00,
-  0x7a, 0x9e, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x3c, 0x60,
-  0x00, 0x00, 0x00, 0x00, 0x7c, 0x7e, 0x67, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x7d, 0xb8, 0x1e, 0x60, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x49, 0x70,
-  0x00, 0x00, 0x00, 0x00, 0x7f, 0x98, 0x00, 0x60, 0x00, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff,
-  0xff, 0xc7, 0xc0, 0x01, 0x04, 0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff,
-  0xff, 0xc7, 0xc0, 0x01, 0x0c, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c,
-  0x4d, 0x54, 0x00, 0x45, 0x44, 0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45,
-  0x57, 0x54, 0x00, 0x45, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
-  0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x45, 0x53, 0x54, 0x35, 0x45, 0x44,
-  0x54, 0x2c, 0x4d, 0x33, 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31,
-  0x2e, 0x31, 0x2e, 0x30, 0x0a
-};
-unsigned int America_New_York_len = 3545;
-unsigned char Australia_Sydney[] = {
-  0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e, 0x80, 0x00, 0x00, 0x00,
-  0x9c, 0x4e, 0xa6, 0x9c, 0x9c, 0xbc, 0x20, 0xf0, 0xcb, 0x54, 0xb3, 0x00,
-  0xcb, 0xc7, 0x57, 0x70, 0xcc, 0xb7, 0x56, 0x80, 0xcd, 0xa7, 0x39, 0x70,
-  0xce, 0xa0, 0x73, 0x00, 0xcf, 0x87, 0x1b, 0x70, 0x03, 0x70, 0x39, 0x80,
-  0x04, 0x0d, 0x1c, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x05, 0xf6, 0x38, 0x80,
-  0x07, 0x2f, 0xfd, 0x80, 0x07, 0xd6, 0x1a, 0x80, 0x09, 0x0f, 0xdf, 0x80,
-  0x09, 0xb5, 0xfc, 0x80, 0x0a, 0xef, 0xc1, 0x80, 0x0b, 0x9f, 0x19, 0x00,
-  0x0c, 0xd8, 0xde, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x0e, 0xb8, 0xc0, 0x00,
-  0x0f, 0x5e, 0xdd, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x11, 0x3e, 0xbf, 0x00,
-  0x12, 0x78, 0x84, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x14, 0x58, 0x66, 0x00,
-  0x14, 0xfe, 0x83, 0x00, 0x16, 0x38, 0x48, 0x00, 0x17, 0x0c, 0x89, 0x80,
-  0x18, 0x21, 0x64, 0x80, 0x18, 0xc7, 0x81, 0x80, 0x1a, 0x01, 0x46, 0x80,
-  0x1a, 0xa7, 0x63, 0x80, 0x1b, 0xe1, 0x28, 0x80, 0x1c, 0x87, 0x45, 0x80,
-  0x1d, 0xc1, 0x0a, 0x80, 0x1e, 0x79, 0x9c, 0x80, 0x1f, 0x97, 0xb2, 0x00,
-  0x20, 0x59, 0x7e, 0x80, 0x21, 0x80, 0xce, 0x80, 0x22, 0x42, 0x9b, 0x00,
-  0x23, 0x69, 0xeb, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x25, 0x49, 0xcd, 0x00,
-  0x25, 0xef, 0xea, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x27, 0xcf, 0xcc, 0x00,
-  0x29, 0x09, 0x91, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x2a, 0xe9, 0x73, 0x00,
-  0x2b, 0x98, 0xca, 0x80, 0x2c, 0xd2, 0x8f, 0x80, 0x2d, 0x78, 0xac, 0x80,
-  0x2e, 0xb2, 0x71, 0x80, 0x2f, 0x58, 0x8e, 0x80, 0x30, 0x92, 0x53, 0x80,
-  0x31, 0x5d, 0x5a, 0x80, 0x32, 0x72, 0x35, 0x80, 0x33, 0x3d, 0x3c, 0x80,
-  0x34, 0x52, 0x17, 0x80, 0x35, 0x1d, 0x1e, 0x80, 0x36, 0x31, 0xf9, 0x80,
-  0x36, 0xfd, 0x00, 0x80, 0x38, 0x1b, 0x16, 0x00, 0x38, 0xdc, 0xe2, 0x80,
-  0x39, 0xa7, 0xe9, 0x80, 0x3a, 0xbc, 0xc4, 0x80, 0x3b, 0xda, 0xda, 0x00,
-  0x3c, 0xa5, 0xe1, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x3e, 0x85, 0xc3, 0x00,
-  0x3f, 0x9a, 0x9e, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x41, 0x83, 0xba, 0x80,
-  0x42, 0x45, 0x87, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x44, 0x2e, 0xa3, 0x80,
-  0x45, 0x43, 0x7e, 0x80, 0x46, 0x05, 0x4b, 0x00, 0x47, 0x23, 0x60, 0x80,
-  0x47, 0xf7, 0xa2, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x49, 0xd7, 0x84, 0x00,
-  0x4a, 0xc7, 0x75, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x4c, 0xa7, 0x57, 0x00,
-  0x4d, 0x97, 0x48, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x4f, 0x77, 0x2a, 0x00,
-  0x50, 0x70, 0x55, 0x80, 0x51, 0x60, 0x46, 0x80, 0x52, 0x50, 0x37, 0x80,
-  0x53, 0x40, 0x28, 0x80, 0x54, 0x30, 0x19, 0x80, 0x55, 0x20, 0x0a, 0x80,
-  0x56, 0x0f, 0xfb, 0x80, 0x56, 0xff, 0xec, 0x80, 0x57, 0xef, 0xdd, 0x80,
-  0x58, 0xdf, 0xce, 0x80, 0x59, 0xcf, 0xbf, 0x80, 0x5a, 0xbf, 0xb0, 0x80,
-  0x5b, 0xb8, 0xdc, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x5d, 0x98, 0xbe, 0x00,
-  0x5e, 0x88, 0xaf, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x60, 0x68, 0x91, 0x00,
-  0x61, 0x58, 0x82, 0x00, 0x62, 0x48, 0x73, 0x00, 0x63, 0x38, 0x64, 0x00,
-  0x64, 0x28, 0x55, 0x00, 0x65, 0x18, 0x46, 0x00, 0x66, 0x11, 0x71, 0x80,
-  0x67, 0x01, 0x62, 0x80, 0x67, 0xf1, 0x53, 0x80, 0x68, 0xe1, 0x44, 0x80,
-  0x69, 0xd1, 0x35, 0x80, 0x6a, 0xc1, 0x26, 0x80, 0x6b, 0xb1, 0x17, 0x80,
-  0x6c, 0xa1, 0x08, 0x80, 0x6d, 0x90, 0xf9, 0x80, 0x6e, 0x80, 0xea, 0x80,
-  0x6f, 0x70, 0xdb, 0x80, 0x70, 0x6a, 0x07, 0x00, 0x71, 0x59, 0xf8, 0x00,
-  0x72, 0x49, 0xe9, 0x00, 0x73, 0x39, 0xda, 0x00, 0x74, 0x29, 0xcb, 0x00,
-  0x75, 0x19, 0xbc, 0x00, 0x76, 0x09, 0xad, 0x00, 0x76, 0xf9, 0x9e, 0x00,
-  0x77, 0xe9, 0x8f, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x79, 0xc9, 0x71, 0x00,
-  0x7a, 0xb9, 0x62, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x7c, 0xa2, 0x7e, 0x80,
-  0x7d, 0x92, 0x6f, 0x80, 0x7e, 0x82, 0x60, 0x80, 0x7f, 0x72, 0x51, 0x80,
-  0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
-  0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x00, 0x00,
-  0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00,
-  0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00,
-  0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54, 0x00, 0x41, 0x45, 0x44, 0x54,
-  0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x8f, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e,
-  0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
-  0x73, 0x16, 0x7f, 0x3c, 0xff, 0xff, 0xff, 0xff, 0x9c, 0x4e, 0xa6, 0x9c,
-  0xff, 0xff, 0xff, 0xff, 0x9c, 0xbc, 0x20, 0xf0, 0xff, 0xff, 0xff, 0xff,
-  0xcb, 0x54, 0xb3, 0x00, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xc7, 0x57, 0x70,
-  0xff, 0xff, 0xff, 0xff, 0xcc, 0xb7, 0x56, 0x80, 0xff, 0xff, 0xff, 0xff,
-  0xcd, 0xa7, 0x39, 0x70, 0xff, 0xff, 0xff, 0xff, 0xce, 0xa0, 0x73, 0x00,
-  0xff, 0xff, 0xff, 0xff, 0xcf, 0x87, 0x1b, 0x70, 0x00, 0x00, 0x00, 0x00,
-  0x03, 0x70, 0x39, 0x80, 0x00, 0x00, 0x00, 0x00, 0x04, 0x0d, 0x1c, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x05, 0xf6, 0x38, 0x80, 0x00, 0x00, 0x00, 0x00, 0x07, 0x2f, 0xfd, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x07, 0xd6, 0x1a, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x09, 0x0f, 0xdf, 0x80, 0x00, 0x00, 0x00, 0x00, 0x09, 0xb5, 0xfc, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x0a, 0xef, 0xc1, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x0b, 0x9f, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd8, 0xde, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x0e, 0xb8, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x5e, 0xdd, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x11, 0x3e, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x78, 0x84, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x14, 0x58, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0xfe, 0x83, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x16, 0x38, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x17, 0x0c, 0x89, 0x80, 0x00, 0x00, 0x00, 0x00, 0x18, 0x21, 0x64, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x18, 0xc7, 0x81, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x1a, 0x01, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xa7, 0x63, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0x28, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x1c, 0x87, 0x45, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1, 0x0a, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x1e, 0x79, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x1f, 0x97, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x59, 0x7e, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x21, 0x80, 0xce, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x22, 0x42, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x69, 0xeb, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x25, 0x49, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xef, 0xea, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x27, 0xcf, 0xcc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x09, 0x91, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x2a, 0xe9, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x98, 0xca, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x2c, 0xd2, 0x8f, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x2d, 0x78, 0xac, 0x80, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb2, 0x71, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x2f, 0x58, 0x8e, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x30, 0x92, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00, 0x31, 0x5d, 0x5a, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0x35, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x33, 0x3d, 0x3c, 0x80, 0x00, 0x00, 0x00, 0x00, 0x34, 0x52, 0x17, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x35, 0x1d, 0x1e, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x36, 0x31, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x36, 0xfd, 0x00, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x38, 0xdc, 0xe2, 0x80, 0x00, 0x00, 0x00, 0x00, 0x39, 0xa7, 0xe9, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x3a, 0xbc, 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x3b, 0xda, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xa5, 0xe1, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x3e, 0x85, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9a, 0x9e, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x41, 0x83, 0xba, 0x80, 0x00, 0x00, 0x00, 0x00, 0x42, 0x45, 0x87, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x44, 0x2e, 0xa3, 0x80, 0x00, 0x00, 0x00, 0x00, 0x45, 0x43, 0x7e, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x46, 0x05, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x47, 0x23, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x47, 0xf7, 0xa2, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x49, 0xd7, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xc7, 0x75, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x4c, 0xa7, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x97, 0x48, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x4f, 0x77, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x70, 0x55, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x51, 0x60, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x52, 0x50, 0x37, 0x80, 0x00, 0x00, 0x00, 0x00, 0x53, 0x40, 0x28, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x54, 0x30, 0x19, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x55, 0x20, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x56, 0x0f, 0xfb, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x56, 0xff, 0xec, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x57, 0xef, 0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x58, 0xdf, 0xce, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x59, 0xcf, 0xbf, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x5a, 0xbf, 0xb0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xb8, 0xdc, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x5d, 0x98, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x88, 0xaf, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x60, 0x68, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x58, 0x82, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x62, 0x48, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x63, 0x38, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x28, 0x55, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x65, 0x18, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x66, 0x11, 0x71, 0x80, 0x00, 0x00, 0x00, 0x00, 0x67, 0x01, 0x62, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x67, 0xf1, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x68, 0xe1, 0x44, 0x80, 0x00, 0x00, 0x00, 0x00, 0x69, 0xd1, 0x35, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x6a, 0xc1, 0x26, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x6b, 0xb1, 0x17, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xa1, 0x08, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x6d, 0x90, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x6e, 0x80, 0xea, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x70, 0xdb, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x70, 0x6a, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x71, 0x59, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x49, 0xe9, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x73, 0x39, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x74, 0x29, 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x19, 0xbc, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x76, 0x09, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x76, 0xf9, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0xe9, 0x8f, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x79, 0xc9, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0xb9, 0x62, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x7c, 0xa2, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x92, 0x6f, 0x80,
-  0x00, 0x00, 0x00, 0x00, 0x7e, 0x82, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00,
-  0x7f, 0x72, 0x51, 0x80, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
-  0x01, 0x02, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
-  0x03, 0x04, 0x03, 0x00, 0x00, 0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a,
-  0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a,
-  0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54,
-  0x00, 0x41, 0x45, 0x44, 0x54, 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00,
-  0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x41, 0x45,
-  0x53, 0x54, 0x2d, 0x31, 0x30, 0x41, 0x45, 0x44, 0x54, 0x2c, 0x4d, 0x31,
-  0x30, 0x2e, 0x31, 0x2e, 0x30, 0x2c, 0x4d, 0x34, 0x2e, 0x31, 0x2e, 0x30,
-  0x2f, 0x33, 0x0a
-};
-unsigned int Australia_Sydney_len = 2223;
diff --git a/abseil-cpp/absl/time/time.cc b/abseil-cpp/absl/time/time.cc
index 6bb36cb..d983c12 100644
--- a/abseil-cpp/absl/time/time.cc
+++ b/abseil-cpp/absl/time/time.cc
@@ -60,11 +60,13 @@
 inline int64_t FloorToUnit(absl::Duration d, absl::Duration unit) {
   absl::Duration rem;
   int64_t q = absl::IDivDuration(d, unit, &rem);
-  return (q > 0 ||
-          rem >= ZeroDuration() ||
-          q == std::numeric_limits<int64_t>::min()) ? q : q - 1;
+  return (q > 0 || rem >= ZeroDuration() ||
+          q == std::numeric_limits<int64_t>::min())
+             ? q
+             : q - 1;
 }
 
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
 inline absl::Time::Breakdown InfiniteFutureBreakdown() {
   absl::Time::Breakdown bd;
   bd.year = std::numeric_limits<int64_t>::max();
@@ -98,6 +100,7 @@
   bd.zone_abbr = "-00";
   return bd;
 }
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
 inline absl::TimeZone::CivilInfo InfiniteFutureCivilInfo() {
   TimeZone::CivilInfo ci;
@@ -119,6 +122,7 @@
   return ci;
 }
 
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
 inline absl::TimeConversion InfiniteFutureTimeConversion() {
   absl::TimeConversion tc;
   tc.pre = tc.trans = tc.post = absl::InfiniteFuture();
@@ -134,9 +138,10 @@
   tc.normalized = true;
   return tc;
 }
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
 // Makes a Time from sec, overflowing to InfiniteFuture/InfinitePast as
-// necessary. If sec is min/max, then consult cs+tz to check for overlow.
+// necessary. If sec is min/max, then consult cs+tz to check for overflow.
 Time MakeTimeWithOverflow(const cctz::time_point<cctz::seconds>& sec,
                           const cctz::civil_second& cs,
                           const cctz::time_zone& tz,
@@ -202,6 +207,7 @@
 // Time
 //
 
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
 absl::Time::Breakdown Time::In(absl::TimeZone tz) const {
   if (*this == absl::InfiniteFuture()) return InfiniteFutureBreakdown();
   if (*this == absl::InfinitePast()) return InfinitePastBreakdown();
@@ -226,6 +232,7 @@
   bd.zone_abbr = al.abbr;
   return bd;
 }
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
 //
 // Conversions from/to other time types.
@@ -296,7 +303,7 @@
   timespec ts;
   absl::Duration d = time_internal::ToUnixDuration(t);
   if (!time_internal::IsInfiniteDuration(d)) {
-    ts.tv_sec = time_internal::GetRepHi(d);
+    ts.tv_sec = static_cast<decltype(ts.tv_sec)>(time_internal::GetRepHi(d));
     if (ts.tv_sec == time_internal::GetRepHi(d)) {  // no time_t narrowing
       ts.tv_nsec = time_internal::GetRepLo(d) / 4;  // floor
       return ts;
@@ -315,7 +322,7 @@
 timeval ToTimeval(Time t) {
   timeval tv;
   timespec ts = absl::ToTimespec(t);
-  tv.tv_sec = ts.tv_sec;
+  tv.tv_sec = static_cast<decltype(tv.tv_sec)>(ts.tv_sec);
   if (tv.tv_sec != ts.tv_sec) {  // narrowing
     if (ts.tv_sec < 0) {
       tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
@@ -397,7 +404,7 @@
 //
 // Conversions involving time zones.
 //
-
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
 absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
                                      int min, int sec, TimeZone tz) {
   // Avoids years that are too extreme for CivilSecond to normalize.
@@ -429,6 +436,7 @@
   }
   return tc;
 }
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
 absl::Time FromTM(const struct tm& tm, absl::TimeZone tz) {
   civil_year_t tm_year = tm.tm_year;
diff --git a/abseil-cpp/absl/time/time.h b/abseil-cpp/absl/time/time.h
index 37f6131..3758080 100644
--- a/abseil-cpp/absl/time/time.h
+++ b/abseil-cpp/absl/time/time.h
@@ -78,11 +78,13 @@
 #include <cmath>
 #include <cstdint>
 #include <ctime>
+#include <limits>
 #include <ostream>
 #include <string>
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/config.h"
 #include "absl/base/macros.h"
 #include "absl/strings/string_view.h"
 #include "absl/time/civil_time.h"
@@ -97,19 +99,24 @@
 
 namespace time_internal {
 int64_t IDivDuration(bool satq, Duration num, Duration den, Duration* rem);
-constexpr Time FromUnixDuration(Duration d);
-constexpr Duration ToUnixDuration(Time t);
-constexpr int64_t GetRepHi(Duration d);
-constexpr uint32_t GetRepLo(Duration d);
-constexpr Duration MakeDuration(int64_t hi, uint32_t lo);
-constexpr Duration MakeDuration(int64_t hi, int64_t lo);
-inline Duration MakePosDoubleDuration(double n);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixDuration(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ToUnixDuration(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr uint32_t GetRepLo(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi,
+                                                              uint32_t lo);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi,
+                                                              int64_t lo);
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration MakePosDoubleDuration(double n);
 constexpr int64_t kTicksPerNanosecond = 4;
 constexpr int64_t kTicksPerSecond = 1000 * 1000 * 1000 * kTicksPerNanosecond;
 template <std::intmax_t N>
-constexpr Duration FromInt64(int64_t v, std::ratio<1, N>);
-constexpr Duration FromInt64(int64_t v, std::ratio<60>);
-constexpr Duration FromInt64(int64_t v, std::ratio<3600>);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v,
+                                                           std::ratio<1, N>);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v,
+                                                           std::ratio<60>);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v,
+                                                           std::ratio<3600>);
 template <typename T>
 using EnableIfIntegral = typename std::enable_if<
     std::is_integral<T>::value || std::is_enum<T>::value, int>::type;
@@ -120,7 +127,7 @@
 
 // Duration
 //
-// The `absl::Duration` class represents a signed, fixed-length span of time.
+// The `absl::Duration` class represents a signed, fixed-length amount of time.
 // A `Duration` is generated using a unit-specific factory function, or is
 // the result of subtracting one `absl::Time` from another. Durations behave
 // like unit-safe integers and they support all the natural integer-like
@@ -162,7 +169,7 @@
   constexpr Duration() : rep_hi_(0), rep_lo_(0) {}  // zero-length duration
 
   // Copyable.
-#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1910
+#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1930
   // Explicitly defining the constexpr copy constructor avoids an MSVC bug.
   constexpr Duration(const Duration& d)
       : rep_hi_(d.rep_hi_), rep_lo_(d.rep_lo_) {}
@@ -181,23 +188,39 @@
   Duration& operator%=(Duration rhs);
 
   // Overloads that forward to either the int64_t or double overloads above.
-  // Integer operands must be representable as int64_t.
-  template <typename T>
+  // Integer operands must be representable as int64_t. Integer division is
+  // truncating, so values less than the resolution will be returned as zero.
+  // Floating-point multiplication and division is rounding (halfway cases
+  // rounding away from zero), so values less than the resolution may be
+  // returned as either the resolution or zero.  In particular, `d / 2.0`
+  // can produce `d` when it is the resolution and "even".
+  template <typename T, time_internal::EnableIfIntegral<T> = 0>
   Duration& operator*=(T r) {
     int64_t x = r;
     return *this *= x;
   }
-  template <typename T>
+
+  template <typename T, time_internal::EnableIfIntegral<T> = 0>
   Duration& operator/=(T r) {
     int64_t x = r;
     return *this /= x;
   }
-  Duration& operator*=(float r) { return *this *= static_cast<double>(r); }
-  Duration& operator/=(float r) { return *this /= static_cast<double>(r); }
+
+  template <typename T, time_internal::EnableIfFloat<T> = 0>
+  Duration& operator*=(T r) {
+    double x = r;
+    return *this *= x;
+  }
+
+  template <typename T, time_internal::EnableIfFloat<T> = 0>
+  Duration& operator/=(T r) {
+    double x = r;
+    return *this /= x;
+  }
 
   template <typename H>
   friend H AbslHashValue(H h, Duration d) {
-    return H::combine(std::move(h), d.rep_hi_, d.rep_lo_);
+    return H::combine(std::move(h), d.rep_hi_.Get(), d.rep_lo_);
   }
 
  private:
@@ -206,42 +229,138 @@
   friend constexpr Duration time_internal::MakeDuration(int64_t hi,
                                                         uint32_t lo);
   constexpr Duration(int64_t hi, uint32_t lo) : rep_hi_(hi), rep_lo_(lo) {}
-  int64_t rep_hi_;
+
+  // We store `rep_hi_` 4-byte rather than 8-byte aligned to avoid 4 bytes of
+  // tail padding.
+  class HiRep {
+   public:
+    // Default constructor default-initializes `hi_`, which has the same
+    // semantics as default-initializing an `int64_t` (undetermined value).
+    HiRep() = default;
+
+    HiRep(const HiRep&) = default;
+    HiRep& operator=(const HiRep&) = default;
+
+    explicit constexpr HiRep(const int64_t value)
+        :  // C++17 forbids default-initialization in constexpr contexts. We can
+           // remove this in C++20.
+#if defined(ABSL_IS_BIG_ENDIAN) && ABSL_IS_BIG_ENDIAN
+          hi_(0),
+          lo_(0)
+#else
+          lo_(0),
+          hi_(0)
+#endif
+    {
+      *this = value;
+    }
+
+    constexpr int64_t Get() const {
+      const uint64_t unsigned_value =
+          (static_cast<uint64_t>(hi_) << 32) | static_cast<uint64_t>(lo_);
+      // `static_cast<int64_t>(unsigned_value)` is implementation-defined
+      // before c++20. On all supported platforms the behaviour is that mandated
+      // by c++20, i.e. "If the destination type is signed, [...] the result is
+      // the unique value of the destination type equal to the source value
+      // modulo 2^n, where n is the number of bits used to represent the
+      // destination type."
+      static_assert(
+          (static_cast<int64_t>((std::numeric_limits<uint64_t>::max)()) ==
+           int64_t{-1}) &&
+              (static_cast<int64_t>(static_cast<uint64_t>(
+                                        (std::numeric_limits<int64_t>::max)()) +
+                                    1) ==
+               (std::numeric_limits<int64_t>::min)()),
+          "static_cast<int64_t>(uint64_t) does not have c++20 semantics");
+      return static_cast<int64_t>(unsigned_value);
+    }
+
+    constexpr HiRep& operator=(const int64_t value) {
+      // "If the destination type is unsigned, the resulting value is the
+      // smallest unsigned value equal to the source value modulo 2^n
+      // where `n` is the number of bits used to represent the destination
+      // type".
+      const auto unsigned_value = static_cast<uint64_t>(value);
+      hi_ = static_cast<uint32_t>(unsigned_value >> 32);
+      lo_ = static_cast<uint32_t>(unsigned_value);
+      return *this;
+    }
+
+   private:
+    // Notes:
+    //  - Ideally we would use a `char[]` and `std::bitcast`, but the latter
+    //    does not exist (and is not constexpr in `absl`) before c++20.
+    //  - Order is optimized depending on endianness so that the compiler can
+    //    turn `Get()` (resp. `operator=()`) into a single 8-byte load (resp.
+    //    store).
+#if defined(ABSL_IS_BIG_ENDIAN) && ABSL_IS_BIG_ENDIAN
+    uint32_t hi_;
+    uint32_t lo_;
+#else
+    uint32_t lo_;
+    uint32_t hi_;
+#endif
+  };
+  HiRep rep_hi_;
   uint32_t rep_lo_;
 };
 
 // Relational Operators
-constexpr bool operator<(Duration lhs, Duration rhs);
-constexpr bool operator>(Duration lhs, Duration rhs) { return rhs < lhs; }
-constexpr bool operator>=(Duration lhs, Duration rhs) { return !(lhs < rhs); }
-constexpr bool operator<=(Duration lhs, Duration rhs) { return !(rhs < lhs); }
-constexpr bool operator==(Duration lhs, Duration rhs);
-constexpr bool operator!=(Duration lhs, Duration rhs) { return !(lhs == rhs); }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs,
+                                                       Duration rhs);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>(Duration lhs,
+                                                       Duration rhs) {
+  return rhs < lhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>=(Duration lhs,
+                                                        Duration rhs) {
+  return !(lhs < rhs);
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<=(Duration lhs,
+                                                        Duration rhs) {
+  return !(rhs < lhs);
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Duration lhs,
+                                                        Duration rhs);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator!=(Duration lhs,
+                                                        Duration rhs) {
+  return !(lhs == rhs);
+}
 
 // Additive Operators
-constexpr Duration operator-(Duration d);
-inline Duration operator+(Duration lhs, Duration rhs) { return lhs += rhs; }
-inline Duration operator-(Duration lhs, Duration rhs) { return lhs -= rhs; }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration operator-(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator+(Duration lhs,
+                                                        Duration rhs) {
+  return lhs += rhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator-(Duration lhs,
+                                                        Duration rhs) {
+  return lhs -= rhs;
+}
 
 // Multiplicative Operators
 // Integer operands must be representable as int64_t.
 template <typename T>
-Duration operator*(Duration lhs, T rhs) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(Duration lhs, T rhs) {
   return lhs *= rhs;
 }
 template <typename T>
-Duration operator*(T lhs, Duration rhs) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(T lhs, Duration rhs) {
   return rhs *= lhs;
 }
 template <typename T>
-Duration operator/(Duration lhs, T rhs) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator/(Duration lhs, T rhs) {
   return lhs /= rhs;
 }
-inline int64_t operator/(Duration lhs, Duration rhs) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t operator/(Duration lhs,
+                                                       Duration rhs) {
   return time_internal::IDivDuration(true, lhs, rhs,
                                      &lhs);  // trunc towards zero
 }
-inline Duration operator%(Duration lhs, Duration rhs) { return lhs %= rhs; }
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs,
+                                                        Duration rhs) {
+  return lhs %= rhs;
+}
 
 // IDivDuration()
 //
@@ -288,18 +407,20 @@
 //
 //   double d = absl::FDivDuration(absl::Milliseconds(1500), absl::Seconds(1));
 //   // d == 1.5
-double FDivDuration(Duration num, Duration den);
+ABSL_ATTRIBUTE_CONST_FUNCTION double FDivDuration(Duration num, Duration den);
 
 // ZeroDuration()
 //
 // Returns a zero-length duration. This function behaves just like the default
 // constructor, but the name helps make the semantics clear at call sites.
-constexpr Duration ZeroDuration() { return Duration(); }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ZeroDuration() {
+  return Duration();
+}
 
 // AbsDuration()
 //
 // Returns the absolute value of a duration.
-inline Duration AbsDuration(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration AbsDuration(Duration d) {
   return (d < ZeroDuration()) ? -d : d;
 }
 
@@ -311,7 +432,7 @@
 //
 //   absl::Duration d = absl::Nanoseconds(123456789);
 //   absl::Duration a = absl::Trunc(d, absl::Microseconds(1));  // 123456us
-Duration Trunc(Duration d, Duration unit);
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Trunc(Duration d, Duration unit);
 
 // Floor()
 //
@@ -322,7 +443,7 @@
 //
 //   absl::Duration d = absl::Nanoseconds(123456789);
 //   absl::Duration b = absl::Floor(d, absl::Microseconds(1));  // 123456us
-Duration Floor(Duration d, Duration unit);
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Floor(Duration d, Duration unit);
 
 // Ceil()
 //
@@ -333,7 +454,7 @@
 //
 //   absl::Duration d = absl::Nanoseconds(123456789);
 //   absl::Duration c = absl::Ceil(d, absl::Microseconds(1));   // 123457us
-Duration Ceil(Duration d, Duration unit);
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Ceil(Duration d, Duration unit);
 
 // InfiniteDuration()
 //
@@ -369,7 +490,7 @@
 //
 // The examples involving the `/` operator above also apply to `IDivDuration()`
 // and `FDivDuration()`.
-constexpr Duration InfiniteDuration();
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration InfiniteDuration();
 
 // Nanoseconds()
 // Microseconds()
@@ -392,12 +513,30 @@
 //
 //   absl::Duration a = absl::Seconds(60);
 //   absl::Duration b = absl::Minutes(1);  // b == a
-constexpr Duration Nanoseconds(int64_t n);
-constexpr Duration Microseconds(int64_t n);
-constexpr Duration Milliseconds(int64_t n);
-constexpr Duration Seconds(int64_t n);
-constexpr Duration Minutes(int64_t n);
-constexpr Duration Hours(int64_t n);
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Nanoseconds(T n) {
+  return time_internal::FromInt64(n, std::nano{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Microseconds(T n) {
+  return time_internal::FromInt64(n, std::micro{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Milliseconds(T n) {
+  return time_internal::FromInt64(n, std::milli{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Seconds(T n) {
+  return time_internal::FromInt64(n, std::ratio<1>{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Minutes(T n) {
+  return time_internal::FromInt64(n, std::ratio<60>{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Hours(T n) {
+  return time_internal::FromInt64(n, std::ratio<3600>{});
+}
 
 // Factory overloads for constructing `Duration` values from a floating-point
 // number of the unit indicated by the factory function's name. These functions
@@ -409,19 +548,19 @@
 //   auto a = absl::Seconds(1.5);        // OK
 //   auto b = absl::Milliseconds(1500);  // BETTER
 template <typename T, time_internal::EnableIfFloat<T> = 0>
-Duration Nanoseconds(T n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Nanoseconds(T n) {
   return n * Nanoseconds(1);
 }
 template <typename T, time_internal::EnableIfFloat<T> = 0>
-Duration Microseconds(T n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Microseconds(T n) {
   return n * Microseconds(1);
 }
 template <typename T, time_internal::EnableIfFloat<T> = 0>
-Duration Milliseconds(T n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Milliseconds(T n) {
   return n * Milliseconds(1);
 }
 template <typename T, time_internal::EnableIfFloat<T> = 0>
-Duration Seconds(T n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Seconds(T n) {
   if (n >= 0) {  // Note: `NaN >= 0` is false.
     if (n >= static_cast<T>((std::numeric_limits<int64_t>::max)())) {
       return InfiniteDuration();
@@ -435,11 +574,11 @@
   }
 }
 template <typename T, time_internal::EnableIfFloat<T> = 0>
-Duration Minutes(T n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Minutes(T n) {
   return n * Minutes(1);
 }
 template <typename T, time_internal::EnableIfFloat<T> = 0>
-Duration Hours(T n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration Hours(T n) {
   return n * Hours(1);
 }
 
@@ -451,21 +590,22 @@
 // ToInt64Hours()
 //
 // Helper functions that convert a Duration to an integral count of the
-// indicated unit. These functions are shorthand for the `IDivDuration()`
-// function above; see its documentation for details about overflow, etc.
+// indicated unit. These return the same results as the `IDivDuration()`
+// function, though they usually do so more efficiently; see the
+// documentation of `IDivDuration()` for details about overflow, etc.
 //
 // Example:
 //
 //   absl::Duration d = absl::Milliseconds(1500);
 //   int64_t isec = absl::ToInt64Seconds(d);  // isec == 1
-int64_t ToInt64Nanoseconds(Duration d);
-int64_t ToInt64Microseconds(Duration d);
-int64_t ToInt64Milliseconds(Duration d);
-int64_t ToInt64Seconds(Duration d);
-int64_t ToInt64Minutes(Duration d);
-int64_t ToInt64Hours(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Nanoseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Microseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Milliseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Seconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Minutes(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Hours(Duration d);
 
-// ToDoubleNanoSeconds()
+// ToDoubleNanoseconds()
 // ToDoubleMicroseconds()
 // ToDoubleMilliseconds()
 // ToDoubleSeconds()
@@ -480,12 +620,12 @@
 //
 //   absl::Duration d = absl::Milliseconds(1500);
 //   double dsec = absl::ToDoubleSeconds(d);  // dsec == 1.5
-double ToDoubleNanoseconds(Duration d);
-double ToDoubleMicroseconds(Duration d);
-double ToDoubleMilliseconds(Duration d);
-double ToDoubleSeconds(Duration d);
-double ToDoubleMinutes(Duration d);
-double ToDoubleHours(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleNanoseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleMicroseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleMilliseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleSeconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleMinutes(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleHours(Duration d);
 
 // FromChrono()
 //
@@ -495,12 +635,18 @@
 //
 //   std::chrono::milliseconds ms(123);
 //   absl::Duration d = absl::FromChrono(ms);
-constexpr Duration FromChrono(const std::chrono::nanoseconds& d);
-constexpr Duration FromChrono(const std::chrono::microseconds& d);
-constexpr Duration FromChrono(const std::chrono::milliseconds& d);
-constexpr Duration FromChrono(const std::chrono::seconds& d);
-constexpr Duration FromChrono(const std::chrono::minutes& d);
-constexpr Duration FromChrono(const std::chrono::hours& d);
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::nanoseconds& d);
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::microseconds& d);
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::milliseconds& d);
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::seconds& d);
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::minutes& d);
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::hours& d);
 
 // ToChronoNanoseconds()
 // ToChronoMicroseconds()
@@ -520,24 +666,33 @@
 //   auto y = absl::ToChronoNanoseconds(d);  // x == y
 //   auto z = absl::ToChronoSeconds(absl::InfiniteDuration());
 //   // z == std::chrono::seconds::max()
-std::chrono::nanoseconds ToChronoNanoseconds(Duration d);
-std::chrono::microseconds ToChronoMicroseconds(Duration d);
-std::chrono::milliseconds ToChronoMilliseconds(Duration d);
-std::chrono::seconds ToChronoSeconds(Duration d);
-std::chrono::minutes ToChronoMinutes(Duration d);
-std::chrono::hours ToChronoHours(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::nanoseconds ToChronoNanoseconds(
+    Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::microseconds ToChronoMicroseconds(
+    Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::milliseconds ToChronoMilliseconds(
+    Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::seconds ToChronoSeconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::minutes ToChronoMinutes(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::hours ToChronoHours(Duration d);
 
 // FormatDuration()
 //
 // Returns a string representing the duration in the form "72h3m0.5s".
 // Returns "inf" or "-inf" for +/- `InfiniteDuration()`.
-std::string FormatDuration(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::string FormatDuration(Duration d);
 
 // Output stream operator.
 inline std::ostream& operator<<(std::ostream& os, Duration d) {
   return os << FormatDuration(d);
 }
 
+// Support for StrFormat(), StrCat() etc.
+template <typename Sink>
+void AbslStringify(Sink& sink, Duration d) {
+  sink.Append(FormatDuration(d));
+}
+
 // ParseDuration()
 //
 // Parses a duration string consisting of a possibly signed sequence of
@@ -547,10 +702,20 @@
 // `ZeroDuration()`. Parses "inf" and "-inf" as +/- `InfiniteDuration()`.
 bool ParseDuration(absl::string_view dur_string, Duration* d);
 
-// Support for flag values of type Duration. Duration flags must be specified
-// in a format that is valid input for absl::ParseDuration().
+// AbslParseFlag()
+//
+// Parses a command-line flag string representation `text` into a Duration
+// value. Duration flags must be specified in a format that is valid input for
+// `absl::ParseDuration()`.
 bool AbslParseFlag(absl::string_view text, Duration* dst, std::string* error);
+
+
+// AbslUnparseFlag()
+//
+// Unparses a Duration value into a command-line string representation using
+// the format specified by `absl::ParseDuration()`.
 std::string AbslUnparseFlag(Duration d);
+
 ABSL_DEPRECATED("Use AbslParseFlag() instead.")
 bool ParseFlag(const std::string& text, Duration* dst, std::string* error);
 ABSL_DEPRECATED("Use AbslUnparseFlag() instead.")
@@ -637,9 +802,8 @@
   // `absl::TimeZone`.
   //
   // Deprecated. Use `absl::TimeZone::CivilInfo`.
-  struct
-      Breakdown {
-    int64_t year;          // year (e.g., 2013)
+  struct ABSL_DEPRECATED("Use `absl::TimeZone::CivilInfo`.") Breakdown {
+    int64_t year;        // year (e.g., 2013)
     int month;           // month of year [1:12]
     int day;             // day of month [1:31]
     int hour;            // hour of day [0:23]
@@ -664,7 +828,10 @@
   // Returns the breakdown of this instant in the given TimeZone.
   //
   // Deprecated. Use `absl::TimeZone::At(Time)`.
+  ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+  ABSL_DEPRECATED("Use `absl::TimeZone::At(Time)`.")
   Breakdown In(TimeZone tz) const;
+  ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
   template <typename H>
   friend H AbslHashValue(H h, Time t) {
@@ -685,48 +852,69 @@
 };
 
 // Relational Operators
-constexpr bool operator<(Time lhs, Time rhs) { return lhs.rep_ < rhs.rep_; }
-constexpr bool operator>(Time lhs, Time rhs) { return rhs < lhs; }
-constexpr bool operator>=(Time lhs, Time rhs) { return !(lhs < rhs); }
-constexpr bool operator<=(Time lhs, Time rhs) { return !(rhs < lhs); }
-constexpr bool operator==(Time lhs, Time rhs) { return lhs.rep_ == rhs.rep_; }
-constexpr bool operator!=(Time lhs, Time rhs) { return !(lhs == rhs); }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Time lhs, Time rhs) {
+  return lhs.rep_ < rhs.rep_;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>(Time lhs, Time rhs) {
+  return rhs < lhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>=(Time lhs, Time rhs) {
+  return !(lhs < rhs);
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<=(Time lhs, Time rhs) {
+  return !(rhs < lhs);
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Time lhs, Time rhs) {
+  return lhs.rep_ == rhs.rep_;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator!=(Time lhs, Time rhs) {
+  return !(lhs == rhs);
+}
 
 // Additive Operators
-inline Time operator+(Time lhs, Duration rhs) { return lhs += rhs; }
-inline Time operator+(Duration lhs, Time rhs) { return rhs += lhs; }
-inline Time operator-(Time lhs, Duration rhs) { return lhs -= rhs; }
-inline Duration operator-(Time lhs, Time rhs) { return lhs.rep_ - rhs.rep_; }
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Time operator+(Time lhs, Duration rhs) {
+  return lhs += rhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Time operator+(Duration lhs, Time rhs) {
+  return rhs += lhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Time operator-(Time lhs, Duration rhs) {
+  return lhs -= rhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator-(Time lhs, Time rhs) {
+  return lhs.rep_ - rhs.rep_;
+}
 
 // UnixEpoch()
 //
 // Returns the `absl::Time` representing "1970-01-01 00:00:00.0 +0000".
-constexpr Time UnixEpoch() { return Time(); }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time UnixEpoch() { return Time(); }
 
 // UniversalEpoch()
 //
 // Returns the `absl::Time` representing "0001-01-01 00:00:00.0 +0000", the
 // epoch of the ICU Universal Time Scale.
-constexpr Time UniversalEpoch() {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time UniversalEpoch() {
   // 719162 is the number of days from 0001-01-01 to 1970-01-01,
   // assuming the Gregorian calendar.
-  return Time(time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, 0U));
+  return Time(
+      time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, uint32_t{0}));
 }
 
 // InfiniteFuture()
 //
 // Returns an `absl::Time` that is infinitely far in the future.
-constexpr Time InfiniteFuture() {
-  return Time(
-      time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U));
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time InfiniteFuture() {
+  return Time(time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
+                                          ~uint32_t{0}));
 }
 
 // InfinitePast()
 //
 // Returns an `absl::Time` that is infinitely far in the past.
-constexpr Time InfinitePast() {
-  return Time(
-      time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U));
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time InfinitePast() {
+  return Time(time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(),
+                                          ~uint32_t{0}));
 }
 
 // FromUnixNanos()
@@ -737,14 +925,15 @@
 // FromUDate()
 // FromUniversal()
 //
-// Creates an `absl::Time` from a variety of other representations.
-constexpr Time FromUnixNanos(int64_t ns);
-constexpr Time FromUnixMicros(int64_t us);
-constexpr Time FromUnixMillis(int64_t ms);
-constexpr Time FromUnixSeconds(int64_t s);
-constexpr Time FromTimeT(time_t t);
-Time FromUDate(double udate);
-Time FromUniversal(int64_t universal);
+// Creates an `absl::Time` from a variety of other representations.  See
+// https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixNanos(int64_t ns);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMicros(int64_t us);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMillis(int64_t ms);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixSeconds(int64_t s);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromTimeT(time_t t);
+ABSL_ATTRIBUTE_CONST_FUNCTION Time FromUDate(double udate);
+ABSL_ATTRIBUTE_CONST_FUNCTION Time FromUniversal(int64_t universal);
 
 // ToUnixNanos()
 // ToUnixMicros()
@@ -754,17 +943,19 @@
 // ToUDate()
 // ToUniversal()
 //
-// Converts an `absl::Time` to a variety of other representations.  Note that
-// these operations round down toward negative infinity where necessary to
-// adjust to the resolution of the result type.  Beware of possible time_t
-// over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
-int64_t ToUnixNanos(Time t);
-int64_t ToUnixMicros(Time t);
-int64_t ToUnixMillis(Time t);
-int64_t ToUnixSeconds(Time t);
-time_t ToTimeT(Time t);
-double ToUDate(Time t);
-int64_t ToUniversal(Time t);
+// Converts an `absl::Time` to a variety of other representations.  See
+// https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html
+//
+// Note that these operations round down toward negative infinity where
+// necessary to adjust to the resolution of the result type.  Beware of
+// possible time_t over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixNanos(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMicros(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMillis(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixSeconds(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION time_t ToTimeT(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION double ToUDate(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUniversal(Time t);
 
 // DurationFromTimespec()
 // DurationFromTimeval()
@@ -780,14 +971,14 @@
 // and gettimeofday(2)), so conversion functions are provided for both cases.
 // The "to timespec/val" direction is easily handled via overloading, but
 // for "from timespec/val" the desired type is part of the function name.
-Duration DurationFromTimespec(timespec ts);
-Duration DurationFromTimeval(timeval tv);
-timespec ToTimespec(Duration d);
-timeval ToTimeval(Duration d);
-Time TimeFromTimespec(timespec ts);
-Time TimeFromTimeval(timeval tv);
-timespec ToTimespec(Time t);
-timeval ToTimeval(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration DurationFromTimespec(timespec ts);
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration DurationFromTimeval(timeval tv);
+ABSL_ATTRIBUTE_CONST_FUNCTION timespec ToTimespec(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION timeval ToTimeval(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION Time TimeFromTimespec(timespec ts);
+ABSL_ATTRIBUTE_CONST_FUNCTION Time TimeFromTimeval(timeval tv);
+ABSL_ATTRIBUTE_CONST_FUNCTION timespec ToTimespec(Time t);
+ABSL_ATTRIBUTE_CONST_FUNCTION timeval ToTimeval(Time t);
 
 // FromChrono()
 //
@@ -798,7 +989,8 @@
 //   auto tp = std::chrono::system_clock::from_time_t(123);
 //   absl::Time t = absl::FromChrono(tp);
 //   // t == absl::FromTimeT(123)
-Time FromChrono(const std::chrono::system_clock::time_point& tp);
+ABSL_ATTRIBUTE_PURE_FUNCTION Time
+FromChrono(const std::chrono::system_clock::time_point& tp);
 
 // ToChronoTime()
 //
@@ -811,10 +1003,15 @@
 //   absl::Time t = absl::FromTimeT(123);
 //   auto tp = absl::ToChronoTime(t);
 //   // tp == std::chrono::system_clock::from_time_t(123);
-std::chrono::system_clock::time_point ToChronoTime(Time);
+ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::system_clock::time_point
+    ToChronoTime(Time);
 
-// Support for flag values of type Time. Time flags must be specified in a
-// format that matches absl::RFC3339_full. For example:
+// AbslParseFlag()
+//
+// Parses the command-line flag string representation `text` into a Time value.
+// Time flags must be specified in a format that matches absl::RFC3339_full.
+//
+// For example:
 //
 //   --start_time=2016-01-02T03:04:05.678+08:00
 //
@@ -824,7 +1021,13 @@
 // seconds/milliseconds/etc from the Unix epoch, use an absl::Duration flag
 // and add that duration to absl::UnixEpoch() to get an absl::Time.
 bool AbslParseFlag(absl::string_view text, Time* t, std::string* error);
+
+// AbslUnparseFlag()
+//
+// Unparses a Time value into a command-line string representation using
+// the format specified by `absl::ParseTime()`.
 std::string AbslUnparseFlag(Time t);
+
 ABSL_DEPRECATED("Use AbslParseFlag() instead.")
 bool ParseFlag(const std::string& text, Time* t, std::string* error);
 ABSL_DEPRECATED("Use AbslUnparseFlag() instead.")
@@ -1073,22 +1276,25 @@
 //   absl::Time t = ...;
 //   absl::TimeZone tz = ...;
 //   const auto cd = absl::ToCivilDay(t, tz);
-inline CivilSecond ToCivilSecond(Time t, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilSecond ToCivilSecond(Time t,
+                                                              TimeZone tz) {
   return tz.At(t).cs;  // already a CivilSecond
 }
-inline CivilMinute ToCivilMinute(Time t, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilMinute ToCivilMinute(Time t,
+                                                              TimeZone tz) {
   return CivilMinute(tz.At(t).cs);
 }
-inline CivilHour ToCivilHour(Time t, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilHour ToCivilHour(Time t, TimeZone tz) {
   return CivilHour(tz.At(t).cs);
 }
-inline CivilDay ToCivilDay(Time t, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilDay ToCivilDay(Time t, TimeZone tz) {
   return CivilDay(tz.At(t).cs);
 }
-inline CivilMonth ToCivilMonth(Time t, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilMonth ToCivilMonth(Time t,
+                                                            TimeZone tz) {
   return CivilMonth(tz.At(t).cs);
 }
-inline CivilYear ToCivilYear(Time t, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilYear ToCivilYear(Time t, TimeZone tz) {
   return CivilYear(tz.At(t).cs);
 }
 
@@ -1104,7 +1310,8 @@
 // being when two non-existent civil times map to the same transition time.
 //
 // Note: Accepts civil times of any alignment.
-inline Time FromCivil(CivilSecond ct, TimeZone tz) {
+ABSL_ATTRIBUTE_PURE_FUNCTION inline Time FromCivil(CivilSecond ct,
+                                                   TimeZone tz) {
   const auto ti = tz.At(ct);
   if (ti.kind == TimeZone::TimeInfo::SKIPPED) return ti.trans;
   return ti.pre;
@@ -1118,8 +1325,7 @@
 // `absl::ConvertDateTime()`. Legacy version of `absl::TimeZone::TimeInfo`.
 //
 // Deprecated. Use `absl::TimeZone::TimeInfo`.
-struct
-    TimeConversion {
+struct ABSL_DEPRECATED("Use `absl::TimeZone::TimeInfo`.") TimeConversion {
   Time pre;    // time calculated using the pre-transition offset
   Time trans;  // when the civil-time discontinuity occurred
   Time post;   // time calculated using the post-transition offset
@@ -1153,8 +1359,11 @@
 //   // absl::ToCivilDay(tc.pre, tz).day() == 1
 //
 // Deprecated. Use `absl::TimeZone::At(CivilSecond)`.
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+ABSL_DEPRECATED("Use `absl::TimeZone::At(CivilSecond)`.")
 TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
                                int min, int sec, TimeZone tz);
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 
 // FromDateTime()
 //
@@ -1171,27 +1380,34 @@
 // Deprecated. Use `absl::FromCivil(CivilSecond, TimeZone)`. Note that the
 // behavior of `FromCivil()` differs from `FromDateTime()` for skipped civil
 // times. If you care about that see `absl::TimeZone::At(absl::CivilSecond)`.
-inline Time FromDateTime(int64_t year, int mon, int day, int hour,
-                         int min, int sec, TimeZone tz) {
+ABSL_DEPRECATED("Use `absl::FromCivil(CivilSecond, TimeZone)`.")
+inline Time FromDateTime(int64_t year, int mon, int day, int hour, int min,
+                         int sec, TimeZone tz) {
+  ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
   return ConvertDateTime(year, mon, day, hour, min, sec, tz).pre;
+  ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
 }
 
 // FromTM()
 //
 // Converts the `tm_year`, `tm_mon`, `tm_mday`, `tm_hour`, `tm_min`, and
 // `tm_sec` fields to an `absl::Time` using the given time zone. See ctime(3)
-// for a description of the expected values of the tm fields. If the indicated
-// time instant is not unique (see `absl::TimeZone::At(absl::CivilSecond)`
-// above), the `tm_isdst` field is consulted to select the desired instant
-// (`tm_isdst` > 0 means DST, `tm_isdst` == 0 means no DST, `tm_isdst` < 0
-// means use the post-transition offset).
-Time FromTM(const struct tm& tm, TimeZone tz);
+// for a description of the expected values of the tm fields. If the civil time
+// is unique (see `absl::TimeZone::At(absl::CivilSecond)` above), the matching
+// time instant is returned.  Otherwise, the `tm_isdst` field is consulted to
+// choose between the possible results.  For a repeated civil time, `tm_isdst !=
+// 0` returns the matching DST instant, while `tm_isdst == 0` returns the
+// matching non-DST instant.  For a skipped civil time there is no matching
+// instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0`
+// returns the non-DST instant, that would have matched if the transition never
+// happened.
+ABSL_ATTRIBUTE_PURE_FUNCTION Time FromTM(const struct tm& tm, TimeZone tz);
 
 // ToTM()
 //
 // Converts the given `absl::Time` to a struct tm using the given time zone.
 // See ctime(3) for a description of the values of the tm fields.
-struct tm ToTM(Time t, TimeZone tz);
+ABSL_ATTRIBUTE_PURE_FUNCTION struct tm ToTM(Time t, TimeZone tz);
 
 // RFC3339_full
 // RFC3339_sec
@@ -1250,19 +1466,26 @@
 // `absl::InfinitePast()`, the returned string will be exactly "infinite-past".
 // In both cases the given format string and `absl::TimeZone` are ignored.
 //
-std::string FormatTime(absl::string_view format, Time t, TimeZone tz);
+ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(absl::string_view format,
+                                                    Time t, TimeZone tz);
 
 // Convenience functions that format the given time using the RFC3339_full
 // format.  The first overload uses the provided TimeZone, while the second
 // uses LocalTimeZone().
-std::string FormatTime(Time t, TimeZone tz);
-std::string FormatTime(Time t);
+ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t, TimeZone tz);
+ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t);
 
 // Output stream operator.
 inline std::ostream& operator<<(std::ostream& os, Time t) {
   return os << FormatTime(t);
 }
 
+// Support for StrFormat(), StrCat() etc.
+template <typename Sink>
+void AbslStringify(Sink& sink, Time t) {
+  sink.Append(FormatTime(t));
+}
+
 // ParseTime()
 //
 // Parses an input string according to the provided format string and
@@ -1334,21 +1557,23 @@
 // Creates a Duration with a given representation.
 // REQUIRES: hi,lo is a valid representation of a Duration as specified
 // in time/duration.cc.
-constexpr Duration MakeDuration(int64_t hi, uint32_t lo = 0) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi,
+                                                              uint32_t lo = 0) {
   return Duration(hi, lo);
 }
 
-constexpr Duration MakeDuration(int64_t hi, int64_t lo) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi,
+                                                              int64_t lo) {
   return MakeDuration(hi, static_cast<uint32_t>(lo));
 }
 
 // Make a Duration value from a floating-point number, as long as that number
 // is in the range [ 0 .. numeric_limits<int64_t>::max ), that is, as long as
 // it's positive and can be converted to int64_t without risk of UB.
-inline Duration MakePosDoubleDuration(double n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration MakePosDoubleDuration(double n) {
   const int64_t int_secs = static_cast<int64_t>(n);
   const uint32_t ticks = static_cast<uint32_t>(
-      (n - static_cast<double>(int_secs)) * kTicksPerSecond + 0.5);
+      std::round((n - static_cast<double>(int_secs)) * kTicksPerSecond));
   return ticks < kTicksPerSecond
              ? MakeDuration(int_secs, ticks)
              : MakeDuration(int_secs + 1, ticks - kTicksPerSecond);
@@ -1358,28 +1583,37 @@
 // pair. sec may be positive or negative.  ticks must be in the range
 // -kTicksPerSecond < *ticks < kTicksPerSecond.  If ticks is negative it
 // will be normalized to a positive value in the resulting Duration.
-constexpr Duration MakeNormalizedDuration(int64_t sec, int64_t ticks) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeNormalizedDuration(
+    int64_t sec, int64_t ticks) {
   return (ticks < 0) ? MakeDuration(sec - 1, ticks + kTicksPerSecond)
                      : MakeDuration(sec, ticks);
 }
 
 // Provide access to the Duration representation.
-constexpr int64_t GetRepHi(Duration d) { return d.rep_hi_; }
-constexpr uint32_t GetRepLo(Duration d) { return d.rep_lo_; }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d) {
+  return d.rep_hi_.Get();
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr uint32_t GetRepLo(Duration d) {
+  return d.rep_lo_;
+}
 
 // Returns true iff d is positive or negative infinity.
-constexpr bool IsInfiniteDuration(Duration d) { return GetRepLo(d) == ~0U; }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool IsInfiniteDuration(Duration d) {
+  return GetRepLo(d) == ~uint32_t{0};
+}
 
 // Returns an infinite Duration with the opposite sign.
 // REQUIRES: IsInfiniteDuration(d)
-constexpr Duration OppositeInfinity(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration OppositeInfinity(Duration d) {
   return GetRepHi(d) < 0
-             ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U)
-             : MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U);
+             ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~uint32_t{0})
+             : MakeDuration((std::numeric_limits<int64_t>::min)(),
+                            ~uint32_t{0});
 }
 
 // Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
-constexpr int64_t NegateAndSubtractOne(int64_t n) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t NegateAndSubtractOne(
+    int64_t n) {
   // Note: Good compilers will optimize this expression to ~n when using
   // a two's-complement representation (which is required for int64_t).
   return (n < 0) ? -(n + 1) : (-n) - 1;
@@ -1389,23 +1623,30 @@
 // functions depend on the above mentioned choice of the Unix epoch for the
 // Time representation (and both need to be Time friends).  Without this
 // knowledge, we would need to add-in/subtract-out UnixEpoch() respectively.
-constexpr Time FromUnixDuration(Duration d) { return Time(d); }
-constexpr Duration ToUnixDuration(Time t) { return t.rep_; }
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixDuration(Duration d) {
+  return Time(d);
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ToUnixDuration(Time t) {
+  return t.rep_;
+}
 
 template <std::intmax_t N>
-constexpr Duration FromInt64(int64_t v, std::ratio<1, N>) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v,
+                                                           std::ratio<1, N>) {
   static_assert(0 < N && N <= 1000 * 1000 * 1000, "Unsupported ratio");
   // Subsecond ratios cannot overflow.
   return MakeNormalizedDuration(
       v / N, v % N * kTicksPerNanosecond * 1000 * 1000 * 1000 / N);
 }
-constexpr Duration FromInt64(int64_t v, std::ratio<60>) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v,
+                                                           std::ratio<60>) {
   return (v <= (std::numeric_limits<int64_t>::max)() / 60 &&
           v >= (std::numeric_limits<int64_t>::min)() / 60)
              ? MakeDuration(v * 60)
              : v > 0 ? InfiniteDuration() : -InfiniteDuration();
 }
-constexpr Duration FromInt64(int64_t v, std::ratio<3600>) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v,
+                                                           std::ratio<3600>) {
   return (v <= (std::numeric_limits<int64_t>::max)() / 3600 &&
           v >= (std::numeric_limits<int64_t>::min)() / 3600)
              ? MakeDuration(v * 3600)
@@ -1425,40 +1666,44 @@
 
 // Converts a std::chrono::duration to an absl::Duration.
 template <typename Rep, typename Period>
-constexpr Duration FromChrono(const std::chrono::duration<Rep, Period>& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::duration<Rep, Period>& d) {
   static_assert(IsValidRep64<Rep>(0), "duration::rep is invalid");
   return FromInt64(int64_t{d.count()}, Period{});
 }
 
 template <typename Ratio>
-int64_t ToInt64(Duration d, Ratio) {
+ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64(Duration d, Ratio) {
   // Note: This may be used on MSVC, which may have a system_clock period of
   // std::ratio<1, 10 * 1000 * 1000>
   return ToInt64Seconds(d * Ratio::den / Ratio::num);
 }
 // Fastpath implementations for the 6 common duration units.
-inline int64_t ToInt64(Duration d, std::nano) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::nano) {
   return ToInt64Nanoseconds(d);
 }
-inline int64_t ToInt64(Duration d, std::micro) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::micro) {
   return ToInt64Microseconds(d);
 }
-inline int64_t ToInt64(Duration d, std::milli) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::milli) {
   return ToInt64Milliseconds(d);
 }
-inline int64_t ToInt64(Duration d, std::ratio<1>) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d,
+                                                     std::ratio<1>) {
   return ToInt64Seconds(d);
 }
-inline int64_t ToInt64(Duration d, std::ratio<60>) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d,
+                                                     std::ratio<60>) {
   return ToInt64Minutes(d);
 }
-inline int64_t ToInt64(Duration d, std::ratio<3600>) {
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d,
+                                                     std::ratio<3600>) {
   return ToInt64Hours(d);
 }
 
 // Converts an absl::Duration to a chrono duration of type T.
 template <typename T>
-T ToChronoDuration(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION T ToChronoDuration(Duration d) {
   using Rep = typename T::rep;
   using Period = typename T::period;
   static_assert(IsValidRep64<Rep>(0), "duration::rep is invalid");
@@ -1472,42 +1717,23 @@
 
 }  // namespace time_internal
 
-constexpr Duration Nanoseconds(int64_t n) {
-  return time_internal::FromInt64(n, std::nano{});
-}
-constexpr Duration Microseconds(int64_t n) {
-  return time_internal::FromInt64(n, std::micro{});
-}
-constexpr Duration Milliseconds(int64_t n) {
-  return time_internal::FromInt64(n, std::milli{});
-}
-constexpr Duration Seconds(int64_t n) {
-  return time_internal::FromInt64(n, std::ratio<1>{});
-}
-constexpr Duration Minutes(int64_t n) {
-  return time_internal::FromInt64(n, std::ratio<60>{});
-}
-constexpr Duration Hours(int64_t n) {
-  return time_internal::FromInt64(n, std::ratio<3600>{});
-}
-
-constexpr bool operator<(Duration lhs, Duration rhs) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs,
+                                                       Duration rhs) {
   return time_internal::GetRepHi(lhs) != time_internal::GetRepHi(rhs)
              ? time_internal::GetRepHi(lhs) < time_internal::GetRepHi(rhs)
-             : time_internal::GetRepHi(lhs) ==
-                       (std::numeric_limits<int64_t>::min)()
-                   ? time_internal::GetRepLo(lhs) + 1 <
-                         time_internal::GetRepLo(rhs) + 1
-                   : time_internal::GetRepLo(lhs) <
-                         time_internal::GetRepLo(rhs);
+         : time_internal::GetRepHi(lhs) == (std::numeric_limits<int64_t>::min)()
+             ? time_internal::GetRepLo(lhs) + 1 <
+                   time_internal::GetRepLo(rhs) + 1
+             : time_internal::GetRepLo(lhs) < time_internal::GetRepLo(rhs);
 }
 
-constexpr bool operator==(Duration lhs, Duration rhs) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Duration lhs,
+                                                        Duration rhs) {
   return time_internal::GetRepHi(lhs) == time_internal::GetRepHi(rhs) &&
          time_internal::GetRepLo(lhs) == time_internal::GetRepLo(rhs);
 }
 
-constexpr Duration operator-(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration operator-(Duration d) {
   // This is a little interesting because of the special cases.
   //
   // If rep_lo_ is zero, we have it easy; it's safe to negate rep_hi_, we're
@@ -1533,47 +1759,53 @@
                              time_internal::GetRepLo(d));
 }
 
-constexpr Duration InfiniteDuration() {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration InfiniteDuration() {
   return time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
-                                     ~0U);
+                                     ~uint32_t{0});
 }
 
-constexpr Duration FromChrono(const std::chrono::nanoseconds& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::nanoseconds& d) {
   return time_internal::FromChrono(d);
 }
-constexpr Duration FromChrono(const std::chrono::microseconds& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::microseconds& d) {
   return time_internal::FromChrono(d);
 }
-constexpr Duration FromChrono(const std::chrono::milliseconds& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::milliseconds& d) {
   return time_internal::FromChrono(d);
 }
-constexpr Duration FromChrono(const std::chrono::seconds& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::seconds& d) {
   return time_internal::FromChrono(d);
 }
-constexpr Duration FromChrono(const std::chrono::minutes& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::minutes& d) {
   return time_internal::FromChrono(d);
 }
-constexpr Duration FromChrono(const std::chrono::hours& d) {
+ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono(
+    const std::chrono::hours& d) {
   return time_internal::FromChrono(d);
 }
 
-constexpr Time FromUnixNanos(int64_t ns) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixNanos(int64_t ns) {
   return time_internal::FromUnixDuration(Nanoseconds(ns));
 }
 
-constexpr Time FromUnixMicros(int64_t us) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMicros(int64_t us) {
   return time_internal::FromUnixDuration(Microseconds(us));
 }
 
-constexpr Time FromUnixMillis(int64_t ms) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMillis(int64_t ms) {
   return time_internal::FromUnixDuration(Milliseconds(ms));
 }
 
-constexpr Time FromUnixSeconds(int64_t s) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixSeconds(int64_t s) {
   return time_internal::FromUnixDuration(Seconds(s));
 }
 
-constexpr Time FromTimeT(time_t t) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromTimeT(time_t t) {
   return time_internal::FromUnixDuration(Seconds(t));
 }
 
diff --git a/abseil-cpp/absl/time/time_benchmark.cc b/abseil-cpp/absl/time/time_benchmark.cc
index 99e6279..93a7c41 100644
--- a/abseil-cpp/absl/time/time_benchmark.cc
+++ b/abseil-cpp/absl/time/time_benchmark.cc
@@ -185,9 +185,11 @@
   int i = 0;
   while (state.KeepRunning()) {
     if ((i & 1) == 0) {
-      absl::FromCivil(absl::CivilSecond(2014, 12, 18, 20, 16, 18), tz);
+      benchmark::DoNotOptimize(
+          absl::FromCivil(absl::CivilSecond(2014, 12, 18, 20, 16, 18), tz));
     } else {
-      absl::FromCivil(absl::CivilSecond(2013, 11, 15, 18, 30, 27), tz);
+      benchmark::DoNotOptimize(
+          absl::FromCivil(absl::CivilSecond(2013, 11, 15, 18, 30, 27), tz));
     }
     ++i;
   }
@@ -224,7 +226,8 @@
 void BM_Time_FromCivilUTC_Absl(benchmark::State& state) {
   const absl::TimeZone tz = absl::UTCTimeZone();
   while (state.KeepRunning()) {
-    absl::FromCivil(absl::CivilSecond(2014, 12, 18, 20, 16, 18), tz);
+    benchmark::DoNotOptimize(
+        absl::FromCivil(absl::CivilSecond(2014, 12, 18, 20, 16, 18), tz));
   }
 }
 BENCHMARK(BM_Time_FromCivilUTC_Absl);
@@ -235,9 +238,11 @@
   int i = 0;
   while (state.KeepRunning()) {
     if ((i & 1) == 0) {
-      absl::FromCivil(absl::CivilSecond(2014, 12, 0, 20, 16, 18), tz);
+      benchmark::DoNotOptimize(
+          absl::FromCivil(absl::CivilSecond(2014, 12, 0, 20, 16, 18), tz));
     } else {
-      absl::FromCivil(absl::CivilSecond(2013, 11, 0, 18, 30, 27), tz);
+      benchmark::DoNotOptimize(
+          absl::FromCivil(absl::CivilSecond(2013, 11, 0, 18, 30, 27), tz));
     }
     ++i;
   }
diff --git a/abseil-cpp/absl/time/time_test.cc b/abseil-cpp/absl/time/time_test.cc
index b28a99f..bcf4f2a 100644
--- a/abseil-cpp/absl/time/time_test.cc
+++ b/abseil-cpp/absl/time/time_test.cc
@@ -28,6 +28,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/numeric/int128.h"
+#include "absl/strings/str_format.h"
 #include "absl/time/clock.h"
 #include "absl/time/internal/test_util.h"
 
@@ -1070,7 +1071,8 @@
   EXPECT_EQ("292277026596-12-04T15:30:07+00:00",
             absl::FormatTime(absl::RFC3339_full, t, utc));
   EXPECT_EQ(
-      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::max()), t);
+      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::max()),
+      t);
 
   // Checks that we can also get the maximal Time value for a far-east zone.
   const absl::TimeZone plus14 = absl::FixedTimeZone(14 * 60 * 60);
@@ -1078,7 +1080,8 @@
   EXPECT_EQ("292277026596-12-05T05:30:07+14:00",
             absl::FormatTime(absl::RFC3339_full, t, plus14));
   EXPECT_EQ(
-      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::max()), t);
+      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::max()),
+      t);
 
   // One second later should push us to infinity.
   t = absl::FromCivil(absl::CivilSecond(292277026596, 12, 4, 15, 30, 8), utc);
@@ -1092,7 +1095,8 @@
   EXPECT_EQ("-292277022657-01-27T08:29:52+00:00",
             absl::FormatTime(absl::RFC3339_full, t, utc));
   EXPECT_EQ(
-      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::min()), t);
+      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::min()),
+      t);
 
   // Checks that we can also get the minimal Time value for a far-west zone.
   const absl::TimeZone minus12 = absl::FixedTimeZone(-12 * 60 * 60);
@@ -1101,7 +1105,8 @@
   EXPECT_EQ("-292277022657-01-26T20:29:52-12:00",
             absl::FormatTime(absl::RFC3339_full, t, minus12));
   EXPECT_EQ(
-      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::min()), t);
+      absl::UnixEpoch() + absl::Seconds(std::numeric_limits<int64_t>::min()),
+      t);
 
   // One second before should push us to -infinity.
   t = absl::FromCivil(absl::CivilSecond(-292277022657, 1, 27, 8, 29, 51), utc);
@@ -1273,4 +1278,11 @@
   // We have a transition but we don't know which one.
 }
 
+TEST(Time, AbslStringify) {
+  // FormatTime is already well tested, so just use one test case here to
+  // verify that StrFormat("%v", t) works as expected.
+  absl::Time t = absl::Now();
+  EXPECT_EQ(absl::StrFormat("%v", t), absl::FormatTime(t));
+}
+
 }  // namespace
diff --git a/abseil-cpp/absl/types/BUILD.bazel b/abseil-cpp/absl/types/BUILD.bazel
index 83be936..b57d3b9 100644
--- a/abseil-cpp/absl/types/BUILD.bazel
+++ b/abseil-cpp/absl/types/BUILD.bazel
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -78,8 +77,8 @@
         ":any",
         "//absl/base:config",
         "//absl/base:exception_testing",
-        "//absl/base:raw_logging_internal",
         "//absl/container:test_instance_tracker",
+        "//absl/log",
         "@com_google_googletest//:gtest_main",
     ],
 )
@@ -186,7 +185,7 @@
     deps = [
         ":optional",
         "//absl/base:config",
-        "//absl/base:raw_logging_internal",
+        "//absl/log",
         "//absl/meta:type_traits",
         "//absl/strings",
         "@com_google_googletest//:gtest_main",
@@ -315,6 +314,7 @@
     name = "compare",
     hdrs = ["compare.h"],
     copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         "//absl/base:core_headers",
         "//absl/meta:type_traits",
diff --git a/abseil-cpp/absl/types/CMakeLists.txt b/abseil-cpp/absl/types/CMakeLists.txt
index 3f99ad8..c0dcee7 100644
--- a/abseil-cpp/absl/types/CMakeLists.txt
+++ b/abseil-cpp/absl/types/CMakeLists.txt
@@ -43,6 +43,7 @@
   PUBLIC
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     bad_any_cast_impl
@@ -67,9 +68,9 @@
     absl::any
     absl::config
     absl::exception_testing
-    absl::raw_logging_internal
+    absl::log
     absl::test_instance_tracker
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -85,7 +86,7 @@
     absl::exception_testing
     absl::raw_logging_internal
     absl::test_instance_tracker
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -99,7 +100,7 @@
     absl::any
     absl::config
     absl::exception_safety_testing
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -136,7 +137,7 @@
     absl::inlined_vector
     absl::hash_testing
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -156,7 +157,7 @@
     absl::inlined_vector
     absl::hash_testing
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -219,10 +220,10 @@
   DEPS
     absl::optional
     absl::config
-    absl::raw_logging_internal
+    absl::log
     absl::strings
     absl::type_traits
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -236,9 +237,10 @@
     absl::optional
     absl::config
     absl::exception_safety_testing
-    gmock_main
+    GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     conformance_testing
@@ -258,7 +260,7 @@
     absl::type_traits
     absl::strings
     absl::utility
-    gmock_main
+    GTest::gmock_main
   TESTONLY
 )
 
@@ -275,7 +277,7 @@
   DEPS
     absl::conformance_testing
     absl::type_traits
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_test(
@@ -288,7 +290,7 @@
   DEPS
     absl::conformance_testing
     absl::type_traits
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -324,7 +326,7 @@
     absl::memory
     absl::type_traits
     absl::strings
-    gmock_main
+    GTest::gmock_main
 )
 
 absl_cc_library(
@@ -350,12 +352,9 @@
   DEPS
     absl::base
     absl::compare
-    gmock_main
+    GTest::gmock_main
 )
 
-# TODO(cohenjon,zhangxy) Figure out why this test is failing on gcc 4.8
-if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)
-else()
 absl_cc_test(
   NAME
     variant_exception_safety_test
@@ -368,6 +367,5 @@
     absl::config
     absl::exception_safety_testing
     absl::memory
-    gmock_main
+    GTest::gmock_main
 )
-endif()
diff --git a/abseil-cpp/absl/types/any.h b/abseil-cpp/absl/types/any.h
index fc5a074..61f071f 100644
--- a/abseil-cpp/absl/types/any.h
+++ b/abseil-cpp/absl/types/any.h
@@ -53,6 +53,7 @@
 #ifndef ABSL_TYPES_ANY_H_
 #define ABSL_TYPES_ANY_H_
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/utility/utility.h"
 
@@ -81,18 +82,9 @@
 #include <utility>
 
 #include "absl/base/internal/fast_type_id.h"
-#include "absl/base/macros.h"
 #include "absl/meta/type_traits.h"
 #include "absl/types/bad_any_cast.h"
 
-// NOTE: This macro is an implementation detail that is undefined at the bottom
-// of the file. It is not intended for expansion directly from user code.
-#ifdef ABSL_ANY_DETAIL_HAS_RTTI
-#error ABSL_ANY_DETAIL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
-#define ABSL_ANY_DETAIL_HAS_RTTI 1
-#endif  // !defined(__GNUC__) || defined(__GXX_RTTI)
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
@@ -297,7 +289,7 @@
       typename T, typename... Args, typename VT = absl::decay_t<T>,
       absl::enable_if_t<std::is_copy_constructible<VT>::value &&
                         std::is_constructible<VT, Args...>::value>* = nullptr>
-  VT& emplace(Args&&... args) {
+  VT& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     reset();  // NOTE: reset() is required here even in the world of exceptions.
     Obj<VT>* const object_ptr =
         new Obj<VT>(in_place, std::forward<Args>(args)...);
@@ -321,7 +313,8 @@
       absl::enable_if_t<std::is_copy_constructible<VT>::value &&
                         std::is_constructible<VT, std::initializer_list<U>&,
                                               Args...>::value>* = nullptr>
-  VT& emplace(std::initializer_list<U> ilist, Args&&... args) {
+  VT& emplace(std::initializer_list<U> ilist,
+              Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     reset();  // NOTE: reset() is required here even in the world of exceptions.
     Obj<VT>* const object_ptr =
         new Obj<VT>(in_place, ilist, std::forward<Args>(args)...);
@@ -348,7 +341,7 @@
   // returns `false`.
   bool has_value() const noexcept { return obj_ != nullptr; }
 
-#if ABSL_ANY_DETAIL_HAS_RTTI
+#ifdef ABSL_INTERNAL_HAS_RTTI
   // Returns: typeid(T) if *this has a contained object of type T, otherwise
   // typeid(void).
   const std::type_info& type() const noexcept {
@@ -358,7 +351,7 @@
 
     return typeid(void);
   }
-#endif  // ABSL_ANY_DETAIL_HAS_RTTI
+#endif  // ABSL_INTERNAL_HAS_RTTI
 
  private:
   // Tagged type-erased abstraction for holding a cloneable object.
@@ -367,9 +360,9 @@
     virtual ~ObjInterface() = default;
     virtual std::unique_ptr<ObjInterface> Clone() const = 0;
     virtual const void* ObjTypeId() const noexcept = 0;
-#if ABSL_ANY_DETAIL_HAS_RTTI
+#ifdef ABSL_INTERNAL_HAS_RTTI
     virtual const std::type_info& Type() const noexcept = 0;
-#endif  // ABSL_ANY_DETAIL_HAS_RTTI
+#endif  // ABSL_INTERNAL_HAS_RTTI
   };
 
   // Hold a value of some queryable type, with an ability to Clone it.
@@ -386,9 +379,9 @@
 
     const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
 
-#if ABSL_ANY_DETAIL_HAS_RTTI
+#ifdef ABSL_INTERNAL_HAS_RTTI
     const std::type_info& Type() const noexcept final { return typeid(T); }
-#endif  // ABSL_ANY_DETAIL_HAS_RTTI
+#endif  // ABSL_INTERNAL_HAS_RTTI
 
     T value;
   };
@@ -521,8 +514,6 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#undef ABSL_ANY_DETAIL_HAS_RTTI
-
 #endif  // ABSL_USES_STD_ANY
 
 #endif  // ABSL_TYPES_ANY_H_
diff --git a/abseil-cpp/absl/types/any_test.cc b/abseil-cpp/absl/types/any_test.cc
index 70e4ba2..666ea5b 100644
--- a/abseil-cpp/absl/types/any_test.cc
+++ b/abseil-cpp/absl/types/any_test.cc
@@ -25,8 +25,8 @@
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/exception_testing.h"
-#include "absl/base/internal/raw_logging.h"
 #include "absl/container/internal/test_instance_tracker.h"
+#include "absl/log/log.h"
 
 namespace {
 using absl::test_internal::CopyableOnlyInstance;
@@ -704,7 +704,7 @@
 #ifdef ABSL_HAVE_EXCEPTIONS
     throw BadCopy();
 #else
-    ABSL_RAW_LOG(FATAL, "Bad copy");
+    LOG(FATAL) << "Bad copy";
 #endif
   }
 };
@@ -754,26 +754,23 @@
 
 // Test the guarantees regarding exceptions in emplace.
 TEST(AnyTest, FailedEmplace) {
-  {
-    BadCopyable bad;
-    absl::any target;
-    ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
-  }
+  BadCopyable bad;
+  absl::any target;
+  ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
+}
 
-  {
-    BadCopyable bad;
-    absl::any target(absl::in_place_type<int>);
-    ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
-#if defined(ABSL_USES_STD_ANY) && defined(__GLIBCXX__)
-    // libstdc++ std::any::emplace() implementation (as of 7.2) has a bug: if an
-    // exception is thrown, *this contains a value.
-#define ABSL_GLIBCXX_ANY_EMPLACE_EXCEPTION_BUG 1
+// GCC and Clang have a bug here.
+// Ine some cases, the exception seems to be thrown at the wrong time, and
+// target may contain a value.
+#ifdef __GNUC__
+TEST(AnyTest, DISABLED_FailedEmplaceInPlace) {
+#else
+TEST(AnyTest, FailedEmplaceInPlace) {
 #endif
-#if defined(ABSL_HAVE_EXCEPTIONS) && \
-    !defined(ABSL_GLIBCXX_ANY_EMPLACE_EXCEPTION_BUG)
-    EXPECT_FALSE(target.has_value());
-#endif
-  }
+  BadCopyable bad;
+  absl::any target(absl::in_place_type<int>);
+  ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
+  EXPECT_FALSE(target.has_value());
 }
 
 }  // namespace
diff --git a/abseil-cpp/absl/types/bad_optional_access.h b/abseil-cpp/absl/types/bad_optional_access.h
index a500286..049e72a 100644
--- a/abseil-cpp/absl/types/bad_optional_access.h
+++ b/abseil-cpp/absl/types/bad_optional_access.h
@@ -67,7 +67,7 @@
 namespace optional_internal {
 
 // throw delegator
-[[noreturn]] void throw_bad_optional_access();
+[[noreturn]] ABSL_DLL void throw_bad_optional_access();
 
 }  // namespace optional_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/types/bad_variant_access.h b/abseil-cpp/absl/types/bad_variant_access.h
index 095969f..8ab215e 100644
--- a/abseil-cpp/absl/types/bad_variant_access.h
+++ b/abseil-cpp/absl/types/bad_variant_access.h
@@ -70,8 +70,8 @@
 
 namespace variant_internal {
 
-[[noreturn]] void ThrowBadVariantAccess();
-[[noreturn]] void Rethrow();
+[[noreturn]] ABSL_DLL void ThrowBadVariantAccess();
+[[noreturn]] ABSL_DLL void Rethrow();
 
 }  // namespace variant_internal
 ABSL_NAMESPACE_END
diff --git a/abseil-cpp/absl/types/compare.h b/abseil-cpp/absl/types/compare.h
index 19b076e..2b89b69 100644
--- a/abseil-cpp/absl/types/compare.h
+++ b/abseil-cpp/absl/types/compare.h
@@ -36,6 +36,7 @@
 #include <type_traits>
 
 #include "absl/base/attributes.h"
+#include "absl/base/macros.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
@@ -44,29 +45,37 @@
 
 using value_type = int8_t;
 
-template <typename T>
-struct Fail {
-  static_assert(sizeof(T) < 0, "Only literal `0` is allowed.");
-};
-
-// We need the NullPtrT template to avoid triggering the modernize-use-nullptr
-// ClangTidy warning in user code.
-template <typename NullPtrT = std::nullptr_t>
-struct OnlyLiteralZero {
-  constexpr OnlyLiteralZero(NullPtrT) noexcept {}  // NOLINT
+class OnlyLiteralZero {
+ public:
+#if ABSL_HAVE_ATTRIBUTE(enable_if)
+  // On clang, we can avoid triggering modernize-use-nullptr by only enabling
+  // this overload when the value is a compile time integer constant equal to 0.
+  //
+  // In c++20, this could be a static_assert in a consteval function.
+  constexpr OnlyLiteralZero(int n)  // NOLINT
+      __attribute__((enable_if(n == 0, "Only literal `0` is allowed."))) {}
+#else  // ABSL_HAVE_ATTRIBUTE(enable_if)
+  // Accept only literal zero since it can be implicitly converted to a pointer
+  // to member type. nullptr constants will be caught by the other constructor
+  // which accepts a nullptr_t.
+  //
+  // This constructor is not used for clang since it triggers
+  // modernize-use-nullptr.
+  constexpr OnlyLiteralZero(int OnlyLiteralZero::*) noexcept {}  // NOLINT
+#endif
 
   // Fails compilation when `nullptr` or integral type arguments other than
   // `int` are passed. This constructor doesn't accept `int` because literal `0`
   // has type `int`. Literal `0` arguments will be implicitly converted to
   // `std::nullptr_t` and accepted by the above constructor, while other `int`
   // arguments will fail to be converted and cause compilation failure.
-  template <
-      typename T,
-      typename = typename std::enable_if<
-          std::is_same<T, std::nullptr_t>::value ||
-          (std::is_integral<T>::value && !std::is_same<T, int>::value)>::type,
-      typename = typename Fail<T>::type>
-  OnlyLiteralZero(T);  // NOLINT
+  template <typename T, typename = typename std::enable_if<
+                            std::is_same<T, std::nullptr_t>::value ||
+                            (std::is_integral<T>::value &&
+                             !std::is_same<T, int>::value)>::type>
+  OnlyLiteralZero(T) {  // NOLINT
+    static_assert(sizeof(T) < 0, "Only literal `0` is allowed.");
+  }
 };
 
 enum class eq : value_type {
@@ -163,18 +172,18 @@
 
   // Comparisons
   friend constexpr bool operator==(
-      weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_equality v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ == 0;
   }
   friend constexpr bool operator!=(
-      weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_equality v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ != 0;
   }
-  friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
                                    weak_equality v) noexcept {
     return 0 == v.value_;
   }
-  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
                                    weak_equality v) noexcept {
     return 0 != v.value_;
   }
@@ -214,18 +223,18 @@
   }
   // Comparisons
   friend constexpr bool operator==(
-      strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_equality v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ == 0;
   }
   friend constexpr bool operator!=(
-      strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_equality v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ != 0;
   }
-  friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
                                    strong_equality v) noexcept {
     return 0 == v.value_;
   }
-  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
                                    strong_equality v) noexcept {
     return 0 != v.value_;
   }
@@ -277,50 +286,50 @@
   }
   // Comparisons
   friend constexpr bool operator==(
-      partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.is_ordered() && v.value_ == 0;
   }
   friend constexpr bool operator!=(
-      partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return !v.is_ordered() || v.value_ != 0;
   }
   friend constexpr bool operator<(
-      partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.is_ordered() && v.value_ < 0;
   }
   friend constexpr bool operator<=(
-      partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.is_ordered() && v.value_ <= 0;
   }
   friend constexpr bool operator>(
-      partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.is_ordered() && v.value_ > 0;
   }
   friend constexpr bool operator>=(
-      partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.is_ordered() && v.value_ >= 0;
   }
-  friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
                                    partial_ordering v) noexcept {
     return v.is_ordered() && 0 == v.value_;
   }
-  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
                                    partial_ordering v) noexcept {
     return !v.is_ordered() || 0 != v.value_;
   }
-  friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
                                   partial_ordering v) noexcept {
     return v.is_ordered() && 0 < v.value_;
   }
-  friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
                                    partial_ordering v) noexcept {
     return v.is_ordered() && 0 <= v.value_;
   }
-  friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
                                   partial_ordering v) noexcept {
     return v.is_ordered() && 0 > v.value_;
   }
-  friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
                                    partial_ordering v) noexcept {
     return v.is_ordered() && 0 >= v.value_;
   }
@@ -369,50 +378,50 @@
   }
   // Comparisons
   friend constexpr bool operator==(
-      weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ == 0;
   }
   friend constexpr bool operator!=(
-      weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ != 0;
   }
   friend constexpr bool operator<(
-      weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ < 0;
   }
   friend constexpr bool operator<=(
-      weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ <= 0;
   }
   friend constexpr bool operator>(
-      weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ > 0;
   }
   friend constexpr bool operator>=(
-      weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ >= 0;
   }
-  friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
                                    weak_ordering v) noexcept {
     return 0 == v.value_;
   }
-  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
                                    weak_ordering v) noexcept {
     return 0 != v.value_;
   }
-  friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
                                   weak_ordering v) noexcept {
     return 0 < v.value_;
   }
-  friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
                                    weak_ordering v) noexcept {
     return 0 <= v.value_;
   }
-  friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
                                   weak_ordering v) noexcept {
     return 0 > v.value_;
   }
-  friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
                                    weak_ordering v) noexcept {
     return 0 >= v.value_;
   }
@@ -468,50 +477,50 @@
   }
   // Comparisons
   friend constexpr bool operator==(
-      strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ == 0;
   }
   friend constexpr bool operator!=(
-      strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ != 0;
   }
   friend constexpr bool operator<(
-      strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ < 0;
   }
   friend constexpr bool operator<=(
-      strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ <= 0;
   }
   friend constexpr bool operator>(
-      strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ > 0;
   }
   friend constexpr bool operator>=(
-      strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+      strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
     return v.value_ >= 0;
   }
-  friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
                                    strong_ordering v) noexcept {
     return 0 == v.value_;
   }
-  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
                                    strong_ordering v) noexcept {
     return 0 != v.value_;
   }
-  friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
                                   strong_ordering v) noexcept {
     return 0 < v.value_;
   }
-  friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
                                    strong_ordering v) noexcept {
     return 0 <= v.value_;
   }
-  friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
                                   strong_ordering v) noexcept {
     return 0 > v.value_;
   }
-  friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>,
+  friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
                                    strong_ordering v) noexcept {
     return 0 >= v.value_;
   }
@@ -544,9 +553,9 @@
 // Helper functions to do a boolean comparison of two keys given a boolean
 // or three-way comparator.
 // SFINAE prevents implicit conversions to bool (such as from int).
-template <typename Bool,
-          absl::enable_if_t<std::is_same<bool, Bool>::value, int> = 0>
-constexpr bool compare_result_as_less_than(const Bool r) { return r; }
+template <typename BoolT,
+          absl::enable_if_t<std::is_same<bool, BoolT>::value, int> = 0>
+constexpr bool compare_result_as_less_than(const BoolT r) { return r; }
 constexpr bool compare_result_as_less_than(const absl::weak_ordering r) {
   return r < 0;
 }
diff --git a/abseil-cpp/absl/types/internal/conformance_profile.h b/abseil-cpp/absl/types/internal/conformance_profile.h
index cf64ff4..37b017d 100644
--- a/abseil-cpp/absl/types/internal/conformance_profile.h
+++ b/abseil-cpp/absl/types/internal/conformance_profile.h
@@ -719,6 +719,7 @@
                                                          type##_support); \
   ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(bool, is_##type)
 
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(default_constructible);
 ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_constructible);
 ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_constructible);
@@ -733,6 +734,7 @@
 ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_than_comparable);
 ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(swappable);
 ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(hashable);
+#endif
 
 #undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF
 #undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL
diff --git a/abseil-cpp/absl/types/internal/optional.h b/abseil-cpp/absl/types/internal/optional.h
index 92932b6..a96d260 100644
--- a/abseil-cpp/absl/types/internal/optional.h
+++ b/abseil-cpp/absl/types/internal/optional.h
@@ -25,34 +25,6 @@
 #include "absl/meta/type_traits.h"
 #include "absl/utility/utility.h"
 
-// ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
-//
-// Inheriting constructors is supported in GCC 4.8+, Clang 3.3+ and MSVC 2015.
-// __cpp_inheriting_constructors is a predefined macro and a recommended way to
-// check for this language feature, but GCC doesn't support it until 5.0 and
-// Clang doesn't support it until 3.6.
-// Also, MSVC 2015 has a bug: it doesn't inherit the constexpr template
-// constructor. For example, the following code won't work on MSVC 2015 Update3:
-// struct Base {
-//   int t;
-//   template <typename T>
-//   constexpr Base(T t_) : t(t_) {}
-// };
-// struct Foo : Base {
-//   using Base::Base;
-// }
-// constexpr Foo foo(0);  // doesn't work on MSVC 2015
-#if defined(__clang__)
-#if __has_feature(cxx_inheriting_constructors)
-#define ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
-#endif
-#elif (defined(__GNUC__) &&                                       \
-       (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 8)) || \
-    (__cpp_inheriting_constructors >= 200802) ||                  \
-    (defined(_MSC_VER) && _MSC_VER >= 1910)
-#define ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
-#endif
-
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
@@ -91,7 +63,15 @@
 
   void destruct() noexcept {
     if (engaged_) {
+      // `data_` must be initialized if `engaged_` is true.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
       data_.~T();
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
       engaged_ = false;
     }
   }
@@ -137,15 +117,7 @@
 class optional_data_base : public optional_data_dtor_base<T> {
  protected:
   using base = optional_data_dtor_base<T>;
-#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
   using base::base;
-#else
-  optional_data_base() = default;
-
-  template <typename... Args>
-  constexpr explicit optional_data_base(in_place_t t, Args&&... args)
-      : base(t, absl::forward<Args>(args)...) {}
-#endif
 
   template <typename... Args>
   void construct(Args&&... args) {
@@ -180,27 +152,13 @@
 template <typename T>
 class optional_data<T, true> : public optional_data_base<T> {
  protected:
-#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
   using optional_data_base<T>::optional_data_base;
-#else
-  optional_data() = default;
-
-  template <typename... Args>
-  constexpr explicit optional_data(in_place_t t, Args&&... args)
-      : optional_data_base<T>(t, absl::forward<Args>(args)...) {}
-#endif
 };
 
 template <typename T>
 class optional_data<T, false> : public optional_data_base<T> {
  protected:
-#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
   using optional_data_base<T>::optional_data_base;
-#else
-  template <typename... Args>
-  constexpr explicit optional_data(in_place_t t, Args&&... args)
-      : optional_data_base<T>(t, absl::forward<Args>(args)...) {}
-#endif
 
   optional_data() = default;
 
@@ -391,6 +349,4 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#undef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
-
 #endif  // ABSL_TYPES_INTERNAL_OPTIONAL_H_
diff --git a/abseil-cpp/absl/types/internal/span.h b/abseil-cpp/absl/types/internal/span.h
index 112612f..ab89ba3 100644
--- a/abseil-cpp/absl/types/internal/span.h
+++ b/abseil-cpp/absl/types/internal/span.h
@@ -28,10 +28,10 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-namespace span_internal {
-// A constexpr min function
-constexpr size_t Min(size_t a, size_t b) noexcept { return a < b ? a : b; }
+template <typename T>
+class Span;
 
+namespace span_internal {
 // Wrappers for access to container data pointers.
 template <typename C>
 constexpr auto GetDataImpl(C& c, char) noexcept  // NOLINT(runtime/references)
@@ -88,7 +88,7 @@
 template <template <typename> class SpanT, typename T>
 bool EqualImpl(SpanT<T> a, SpanT<T> b) {
   static_assert(std::is_const<T>::value, "");
-  return absl::equal(a.begin(), a.end(), b.begin(), b.end());
+  return std::equal(a.begin(), a.end(), b.begin(), b.end());
 }
 
 template <template <typename> class SpanT, typename T>
@@ -99,28 +99,39 @@
   return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
 }
 
-// The `IsConvertible` classes here are needed because of the
-// `std::is_convertible` bug in libcxx when compiled with GCC. This build
-// configuration is used by Android NDK toolchain. Reference link:
-// https://bugs.llvm.org/show_bug.cgi?id=27538.
-template <typename From, typename To>
-struct IsConvertibleHelper {
- private:
-  static std::true_type testval(To);
-  static std::false_type testval(...);
-
- public:
-  using type = decltype(testval(std::declval<From>()));
-};
-
-template <typename From, typename To>
-struct IsConvertible : IsConvertibleHelper<From, To>::type {};
-
-// TODO(zhangxy): replace `IsConvertible` with `std::is_convertible` once the
-// older version of libcxx is not supported.
 template <typename From, typename To>
 using EnableIfConvertibleTo =
-    typename std::enable_if<IsConvertible<From, To>::value>::type;
+    typename std::enable_if<std::is_convertible<From, To>::value>::type;
+
+// IsView is true for types where the return type of .data() is the same for
+// mutable and const instances. This isn't foolproof, but it's only used to
+// enable a compiler warning.
+template <typename T, typename = void, typename = void>
+struct IsView {
+  static constexpr bool value = false;
+};
+
+template <typename T>
+struct IsView<
+    T, absl::void_t<decltype(span_internal::GetData(std::declval<const T&>()))>,
+    absl::void_t<decltype(span_internal::GetData(std::declval<T&>()))>> {
+ private:
+  using Container = std::remove_const_t<T>;
+  using ConstData =
+      decltype(span_internal::GetData(std::declval<const Container&>()));
+  using MutData = decltype(span_internal::GetData(std::declval<Container&>()));
+ public:
+  static constexpr bool value = std::is_same<ConstData, MutData>::value;
+};
+
+// These enablers result in 'int' so they can be used as typenames or defaults
+// in template parameters lists.
+template <typename T>
+using EnableIfIsView = std::enable_if_t<IsView<T>::value, int>;
+
+template <typename T>
+using EnableIfNotIsView = std::enable_if_t<!IsView<T>::value, int>;
+
 }  // namespace span_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/abseil-cpp/absl/types/internal/variant.h b/abseil-cpp/absl/types/internal/variant.h
index d404e80..fc8829e 100644
--- a/abseil-cpp/absl/types/internal/variant.h
+++ b/abseil-cpp/absl/types/internal/variant.h
@@ -16,8 +16,8 @@
 // separate file to avoid cluttering the top of the API header with
 // implementation details.
 
-#ifndef ABSL_TYPES_variant_internal_H_
-#define ABSL_TYPES_variant_internal_H_
+#ifndef ABSL_TYPES_INTERNAL_VARIANT_H_
+#define ABSL_TYPES_INTERNAL_VARIANT_H_
 
 #include <cassert>
 #include <cstddef>
@@ -45,7 +45,7 @@
 template <class... Types>
 class variant;
 
-ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
+ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, static_cast<size_t>(-1));
 
 template <class T>
 struct variant_size;
@@ -449,7 +449,7 @@
 
 template <std::size_t HeadSize, std::size_t... TailSize>
 struct FlattenIndices<HeadSize, TailSize...> {
-  template<class... SizeType>
+  template <class... SizeType>
   static constexpr std::size_t Run(std::size_t head, SizeType... tail) {
     return head + HeadSize * FlattenIndices<TailSize...>::Run(tail...);
   }
@@ -498,8 +498,8 @@
   };
 
   template <class Op, class... SizeType>
-  static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(
-      Op&& op, SizeType... i) {
+  static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(Op&& op,
+                                                              SizeType... i) {
     return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
         FlattenedOp<Op>{absl::forward<Op>(op)},
         FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
@@ -683,13 +683,13 @@
         variant_internal::IndexOfConstructedType<Left, QualifiedNew>;
 
     void operator()(SizeT<NewIndex::value> /*old_i*/
-                    ) const {
+    ) const {
       Access<NewIndex::value>(*left) = absl::forward<QualifiedNew>(other);
     }
 
     template <std::size_t OldIndex>
     void operator()(SizeT<OldIndex> /*old_i*/
-                    ) const {
+    ) const {
       using New =
           typename absl::variant_alternative<NewIndex::value, Left>::type;
       if (std::is_nothrow_constructible<New, QualifiedNew>::value ||
@@ -868,18 +868,6 @@
 template <class Self, std::size_t I>
 struct IsNeitherSelfNorInPlace<Self, in_place_index_t<I>> : std::false_type {};
 
-template <class Variant, class T, class = void>
-struct ConversionIsPossibleImpl : std::false_type {};
-
-template <class Variant, class T>
-struct ConversionIsPossibleImpl<
-    Variant, T,
-    void_t<decltype(ImaginaryFun<Variant>::Run(std::declval<T>(), {}))>>
-    : std::true_type {};
-
-template <class Variant, class T>
-struct ConversionIsPossible : ConversionIsPossibleImpl<Variant, T>::type {};
-
 template <class Variant, class T>
 struct IndexOfConstructedType<
     Variant, T,
@@ -889,8 +877,8 @@
 template <std::size_t... Is>
 struct ContainsVariantNPos
     : absl::negation<std::is_same<  // NOLINT
-          absl::integer_sequence<bool, 0 <= Is...>,
-          absl::integer_sequence<bool, Is != absl::variant_npos...>>> {};
+          std::integer_sequence<bool, 0 <= Is...>,
+          std::integer_sequence<bool, Is != absl::variant_npos...>>> {};
 
 template <class Op, class... QualifiedVariants>
 using RawVisitResult =
@@ -1151,16 +1139,16 @@
   // Type metafunction which returns the element type selected if
   // OverloadSet::Overload() is well-formed when called with argument type U.
   template <typename U>
-  using BestMatch = decltype(
-      variant_internal::OverloadSet<Ts...>::Overload(std::declval<U>()));
+  using BestMatch = decltype(variant_internal::OverloadSet<Ts...>::Overload(
+      std::declval<U>()));
 
   // Type metafunction which returns true if OverloadSet::Overload() is
   // well-formed when called with argument type U.
   // CanAccept can't be just an alias because there is a MSVC bug on parameter
   // pack expansion involving decltype.
   template <typename U>
-  struct CanAccept :
-      std::integral_constant<bool, !std::is_void<BestMatch<U>>::value> {};
+  struct CanAccept
+      : std::integral_constant<bool, !std::is_void<BestMatch<U>>::value> {};
 
   // Type metafunction which returns true if Other is an instantiation of
   // variant, and variants's converting constructor from Other will be
@@ -1183,8 +1171,8 @@
 // A union's defaulted copy/move constructor is deleted if any variant member's
 // copy/move constructor is nontrivial.
 template <typename T>
-struct IsTriviallyMoveConstructible:
-  std::is_move_constructible<Union<T, TrivialMoveOnly>> {};
+struct IsTriviallyMoveConstructible
+    : std::is_move_constructible<Union<T, TrivialMoveOnly>> {};
 
 // To guarantee triviality of all special-member functions that can be trivial,
 // we use a chain of conditional bases for each one.
@@ -1419,14 +1407,14 @@
   VariantMoveAssignBaseNontrivial& operator=(
       VariantMoveAssignBaseNontrivial const&) = default;
 
-    VariantMoveAssignBaseNontrivial&
-    operator=(VariantMoveAssignBaseNontrivial&& other) noexcept(
-        absl::conjunction<std::is_nothrow_move_constructible<T>...,
-                          std::is_nothrow_move_assignable<T>...>::value) {
-      VisitIndices<sizeof...(T)>::Run(
-          VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_);
-      return *this;
-    }
+  VariantMoveAssignBaseNontrivial&
+  operator=(VariantMoveAssignBaseNontrivial&& other) noexcept(
+      absl::conjunction<std::is_nothrow_move_constructible<T>...,
+                        std::is_nothrow_move_assignable<T>...>::value) {
+    VisitIndices<sizeof...(T)>::Run(
+        VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_);
+    return *this;
+  }
 
  protected:
   using Base::index_;
@@ -1450,12 +1438,12 @@
   VariantCopyAssignBaseNontrivial& operator=(
       VariantCopyAssignBaseNontrivial&&) = default;
 
-    VariantCopyAssignBaseNontrivial& operator=(
-        const VariantCopyAssignBaseNontrivial& other) {
-      VisitIndices<sizeof...(T)>::Run(
-          VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_);
-      return *this;
-    }
+  VariantCopyAssignBaseNontrivial& operator=(
+      const VariantCopyAssignBaseNontrivial& other) {
+    VisitIndices<sizeof...(T)>::Run(
+        VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_);
+    return *this;
+  }
 
  protected:
   using Base::index_;
@@ -1643,4 +1631,4 @@
 }  // namespace absl
 
 #endif  // !defined(ABSL_USES_STD_VARIANT)
-#endif  // ABSL_TYPES_variant_internal_H_
+#endif  // ABSL_TYPES_INTERNAL_VARIANT_H_
diff --git a/abseil-cpp/absl/types/optional.h b/abseil-cpp/absl/types/optional.h
index 61540cf..0a8080d 100644
--- a/abseil-cpp/absl/types/optional.h
+++ b/abseil-cpp/absl/types/optional.h
@@ -130,7 +130,7 @@
 
   // Constructs an `optional` holding an empty value, NOT a default constructed
   // `T`.
-  constexpr optional() noexcept {}
+  constexpr optional() noexcept = default;
 
   // Constructs an `optional` initialized with `nullopt` to hold an empty value.
   constexpr optional(nullopt_t) noexcept {}  // NOLINT(runtime/explicit)
@@ -282,15 +282,16 @@
   optional& operator=(optional&& src) = default;
 
   // Value assignment operators
-  template <
-      typename U = T,
-      typename = typename std::enable_if<absl::conjunction<
-          absl::negation<
-              std::is_same<optional<T>, typename std::decay<U>::type>>,
-          absl::negation<
-              absl::conjunction<std::is_scalar<T>,
-                                std::is_same<T, typename std::decay<U>::type>>>,
-          std::is_constructible<T, U>, std::is_assignable<T&, U>>::value>::type>
+  template <typename U = T,
+            int&...,  // Workaround an internal compiler error in GCC 5 to 10.
+            typename = typename std::enable_if<absl::conjunction<
+                absl::negation<
+                    std::is_same<optional<T>, typename std::decay<U>::type> >,
+                absl::negation<absl::conjunction<
+                    std::is_scalar<T>,
+                    std::is_same<T, typename std::decay<U>::type> > >,
+                std::is_constructible<T, U>,
+                std::is_assignable<T&, U> >::value>::type>
   optional& operator=(U&& v) {
     this->assign(std::forward<U>(v));
     return *this;
@@ -298,13 +299,14 @@
 
   template <
       typename U,
+      int&...,  // Workaround an internal compiler error in GCC 5 to 10.
       typename = typename std::enable_if<absl::conjunction<
-          absl::negation<std::is_same<T, U>>,
+          absl::negation<std::is_same<T, U> >,
           std::is_constructible<T, const U&>, std::is_assignable<T&, const U&>,
           absl::negation<
               optional_internal::
                   is_constructible_convertible_assignable_from_optional<
-                      T, U>>>::value>::type>
+                      T, U> > >::value>::type>
   optional& operator=(const optional<U>& rhs) {
     if (rhs) {
       this->assign(*rhs);
@@ -315,13 +317,14 @@
   }
 
   template <typename U,
+            int&...,  // Workaround an internal compiler error in GCC 5 to 10.
             typename = typename std::enable_if<absl::conjunction<
-                absl::negation<std::is_same<T, U>>, std::is_constructible<T, U>,
-                std::is_assignable<T&, U>,
+                absl::negation<std::is_same<T, U> >,
+                std::is_constructible<T, U>, std::is_assignable<T&, U>,
                 absl::negation<
                     optional_internal::
                         is_constructible_convertible_assignable_from_optional<
-                            T, U>>>::value>::type>
+                            T, U> > >::value>::type>
   optional& operator=(optional<U>&& rhs) {
     if (rhs) {
       this->assign(std::move(*rhs));
@@ -354,7 +357,7 @@
   template <typename... Args,
             typename = typename std::enable_if<
                 std::is_constructible<T, Args&&...>::value>::type>
-  T& emplace(Args&&... args) {
+  T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     this->destruct();
     this->construct(std::forward<Args>(args)...);
     return reference();
@@ -374,7 +377,8 @@
   template <typename U, typename... Args,
             typename = typename std::enable_if<std::is_constructible<
                 T, std::initializer_list<U>&, Args&&...>::value>::type>
-  T& emplace(std::initializer_list<U> il, Args&&... args) {
+  T& emplace(std::initializer_list<U> il,
+             Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     this->destruct();
     this->construct(il, std::forward<Args>(args)...);
     return reference();
@@ -411,11 +415,11 @@
   // `optional` is empty, behavior is undefined.
   //
   // If you need myOpt->foo in constexpr, use (*myOpt).foo instead.
-  const T* operator->() const {
+  const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(this->engaged_);
     return std::addressof(this->data_);
   }
-  T* operator->() {
+  T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(this->engaged_);
     return std::addressof(this->data_);
   }
@@ -424,17 +428,17 @@
   //
   // Accesses the underlying `T` value of an `optional`. If the `optional` is
   // empty, behavior is undefined.
-  constexpr const T& operator*() const& {
+  constexpr const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return ABSL_HARDENING_ASSERT(this->engaged_), reference();
   }
-  T& operator*() & {
+  T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(this->engaged_);
     return reference();
   }
-  constexpr const T&& operator*() const && {
+  constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference());
   }
-  T&& operator*() && {
+  T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(this->engaged_);
     return std::move(reference());
   }
@@ -469,23 +473,24 @@
   // and lvalue/rvalue-ness of the `optional` is preserved to the view of
   // the `T` sub-object. Throws `absl::bad_optional_access` when the `optional`
   // is empty.
-  constexpr const T& value() const & {
+  constexpr const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return static_cast<bool>(*this)
                ? reference()
                : (optional_internal::throw_bad_optional_access(), reference());
   }
-  T& value() & {
+  T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return static_cast<bool>(*this)
                ? reference()
                : (optional_internal::throw_bad_optional_access(), reference());
   }
-  T&& value() && {  // NOLINT(build/c++11)
+  T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND {  // NOLINT(build/c++11)
     return std::move(
         static_cast<bool>(*this)
             ? reference()
             : (optional_internal::throw_bad_optional_access(), reference()));
   }
-  constexpr const T&& value() const && {  // NOLINT(build/c++11)
+  constexpr const T&& value()
+      const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {  // NOLINT(build/c++11)
     return absl::move(
         static_cast<bool>(*this)
             ? reference()
diff --git a/abseil-cpp/absl/types/optional_test.cc b/abseil-cpp/absl/types/optional_test.cc
index 7ef142c..5da297b 100644
--- a/abseil-cpp/absl/types/optional_test.cc
+++ b/abseil-cpp/absl/types/optional_test.cc
@@ -23,10 +23,41 @@
 
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
 #include "absl/meta/type_traits.h"
 #include "absl/strings/string_view.h"
 
+#if defined(__cplusplus) && __cplusplus >= 202002L
+// In C++20, volatile-qualified return types are deprecated.
+#define ABSL_VOLATILE_RETURN_TYPES_DEPRECATED 1
+#endif
+
+// The following types help test an internal compiler error in GCC5 though
+// GCC10. The case OptionalTest.InternalCompilerErrorInGcc5ToGcc10 crashes the
+// compiler without a workaround. This test case should remain at the beginning
+// of the file as the internal compiler error is sensitive to other constructs
+// in this file.
+template <class T, class...>
+using GccIceHelper1 = T;
+template <typename T>
+struct GccIceHelper2 {};
+template <typename T>
+class GccIce {
+  template <typename U,
+            typename SecondTemplateArgHasToExistForSomeReason = void,
+            typename DependentType = void,
+            typename = std::is_assignable<GccIceHelper1<T, DependentType>&, U>>
+  GccIce& operator=(GccIceHelper2<U> const&) {}
+};
+
+TEST(OptionalTest, InternalCompilerErrorInGcc5ToGcc10) {
+  GccIce<int> instantiate_ice_with_same_type_as_optional;
+  static_cast<void>(instantiate_ice_with_same_type_as_optional);
+  absl::optional<int> val1;
+  absl::optional<int> val2;
+  val1 = val2;
+}
+
 struct Hashable {};
 
 namespace std {
@@ -66,9 +97,9 @@
 // 4522: multiple assignment operators specified
 // We wrote multiple of them to test that the correct overloads are selected.
 #ifdef _MSC_VER
-#pragma warning( push )
-#pragma warning( disable : 4521)
-#pragma warning( disable : 4522)
+#pragma warning(push)
+#pragma warning(disable : 4521)
+#pragma warning(disable : 4522)
 #endif
 struct Listenable {
   static StructorListener* listener;
@@ -102,20 +133,11 @@
   ~Listenable() { ++listener->destruct; }
 };
 #ifdef _MSC_VER
-#pragma warning( pop )
+#pragma warning(pop)
 #endif
 
 StructorListener* Listenable::listener = nullptr;
 
-// ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST is defined to 1 when the standard
-// library implementation doesn't marked initializer_list's default constructor
-// constexpr. The C++11 standard doesn't specify constexpr on it, but C++14
-// added it. However, libstdc++ 4.7 marked it constexpr.
-#if defined(_LIBCPP_VERSION) && \
-    (_LIBCPP_STD_VER <= 11 || defined(_LIBCPP_HAS_NO_CXX14_CONSTEXPR))
-#define ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST 1
-#endif
-
 struct ConstexprType {
   enum CtorTypes {
     kCtorDefault,
@@ -125,10 +147,8 @@
   };
   constexpr ConstexprType() : x(kCtorDefault) {}
   constexpr explicit ConstexprType(int i) : x(kCtorInt) {}
-#ifndef ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST
   constexpr ConstexprType(std::initializer_list<int> il)
       : x(kCtorInitializerList) {}
-#endif
   constexpr ConstexprType(const char*)  // NOLINT(runtime/explicit)
       : x(kCtorConstChar) {}
   int x;
@@ -205,6 +225,7 @@
     EXPECT_TRUE(opt42_copy);
     EXPECT_EQ(42, *opt42_copy);
   }
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   {
     absl::optional<volatile int> empty, opt42 = 42;
     absl::optional<volatile int> empty_copy(empty);
@@ -213,6 +234,7 @@
     EXPECT_TRUE(opt42_copy);
     EXPECT_EQ(42, *opt42_copy);
   }
+#endif
   // test copyablility
   EXPECT_TRUE(std::is_copy_constructible<absl::optional<int>>::value);
   EXPECT_TRUE(std::is_copy_constructible<absl::optional<Copyable>>::value);
@@ -224,18 +246,11 @@
 
   EXPECT_FALSE(
       absl::is_trivially_copy_constructible<absl::optional<Copyable>>::value);
-#if defined(ABSL_USES_STD_OPTIONAL) && defined(__GLIBCXX__)
-  // libstdc++ std::optional implementation (as of 7.2) has a bug: when T is
-  // trivially copyable, optional<T> is not trivially copyable (due to one of
-  // its base class is unconditionally nontrivial).
-#define ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG 1
-#endif
-#ifndef ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG
   EXPECT_TRUE(
       absl::is_trivially_copy_constructible<absl::optional<int>>::value);
   EXPECT_TRUE(
       absl::is_trivially_copy_constructible<absl::optional<const int>>::value);
-#ifndef _MSC_VER
+#if !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   // See defect report "Trivial copy/move constructor for class with volatile
   // member" at
   // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#2094
@@ -244,8 +259,7 @@
   // Also a cv-qualified scalar type should be trivially copyable.
   EXPECT_TRUE(absl::is_trivially_copy_constructible<
               absl::optional<volatile int>>::value);
-#endif  // _MSC_VER
-#endif  // ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG
+#endif  // !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
 
   // constexpr copy constructor for trivially copyable types
   {
@@ -275,17 +289,10 @@
     EXPECT_TRUE(absl::is_trivially_copy_constructible<
                 absl::optional<const TrivialCopyable>>::value);
 #endif
-    // When testing with VS 2017 15.3, there seems to be a bug in MSVC
-    // std::optional when T is volatile-qualified. So skipping this test.
-    // Bug report:
-    // https://connect.microsoft.com/VisualStudio/feedback/details/3142534
-#if defined(ABSL_USES_STD_OPTIONAL) && defined(_MSC_VER) && _MSC_VER >= 1911
-#define ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG 1
-#endif
-#ifndef ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
     EXPECT_FALSE(std::is_copy_constructible<
                  absl::optional<volatile TrivialCopyable>>::value);
-#endif
+#endif  // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   }
 }
 
@@ -305,11 +312,9 @@
   EXPECT_FALSE(std::is_move_constructible<absl::optional<NonMovable>>::value);
   // test noexcept
   EXPECT_TRUE(std::is_nothrow_move_constructible<absl::optional<int>>::value);
-#ifndef ABSL_USES_STD_OPTIONAL
   EXPECT_EQ(
       absl::default_allocator_is_nothrow::value,
       std::is_nothrow_move_constructible<absl::optional<MoveableThrow>>::value);
-#endif
   EXPECT_TRUE(std::is_nothrow_move_constructible<
               absl::optional<MoveableNoThrow>>::value);
 }
@@ -336,11 +341,9 @@
   constexpr absl::optional<ConstexprType> opt1{absl::in_place_t(), 1};
   static_assert(opt1, "");
   static_assert((*opt1).x == ConstexprType::kCtorInt, "");
-#ifndef ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST
   constexpr absl::optional<ConstexprType> opt2{absl::in_place_t(), {1, 2}};
   static_assert(opt2, "");
   static_assert((*opt2).x == ConstexprType::kCtorInitializerList, "");
-#endif
 
   EXPECT_FALSE((std::is_constructible<absl::optional<ConvertsFromInPlaceT>,
                                       absl::in_place_t>::value));
@@ -638,8 +641,7 @@
   EXPECT_TRUE(absl::is_copy_assignable<NonTrivial>::value);
   EXPECT_FALSE(absl::is_trivially_copy_assignable<NonTrivial>::value);
 
-  // std::optional doesn't support volatile nontrivial types.
-#ifndef ABSL_USES_STD_OPTIONAL
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   {
     StructorListener listener;
     Listenable::listener = &listener;
@@ -658,7 +660,7 @@
     EXPECT_EQ(1, listener.destruct);
     EXPECT_EQ(1, listener.volatile_copy_assign);
   }
-#endif  // ABSL_USES_STD_OPTIONAL
+#endif  // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
 }
 
 TEST(optionalTest, MoveAssignment) {
@@ -681,8 +683,7 @@
     EXPECT_EQ(1, listener.destruct);
     EXPECT_EQ(1, listener.move_assign);
   }
-  // std::optional doesn't support volatile nontrivial types.
-#ifndef ABSL_USES_STD_OPTIONAL
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   {
     StructorListener listener;
     Listenable::listener = &listener;
@@ -702,7 +703,7 @@
     EXPECT_EQ(1, listener.destruct);
     EXPECT_EQ(1, listener.volatile_move_assign);
   }
-#endif  // ABSL_USES_STD_OPTIONAL
+#endif  // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   EXPECT_FALSE(absl::is_move_assignable<absl::optional<const int>>::value);
   EXPECT_TRUE(absl::is_move_assignable<absl::optional<Copyable>>::value);
   EXPECT_TRUE(absl::is_move_assignable<absl::optional<MoveableThrow>>::value);
@@ -974,8 +975,8 @@
   EXPECT_EQ("foo", *opt);
   const auto& opt_const = opt;
   EXPECT_EQ("foo", *opt_const);
-  EXPECT_EQ(opt->size(), 3);
-  EXPECT_EQ(opt_const->size(), 3);
+  EXPECT_EQ(opt->size(), 3u);
+  EXPECT_EQ(opt_const->size(), 3u);
 
   constexpr absl::optional<ConstexprType> opt1(1);
   static_assert((*opt1).x == ConstexprType::kCtorInt, "");
@@ -986,9 +987,8 @@
 // Skip that test to make the build green again when using the old compiler.
 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59296 is fixed in 4.9.1.
 #if defined(__GNUC__) && !defined(__clang__)
-#define GCC_VERSION (__GNUC__ * 10000 \
-                     + __GNUC_MINOR__ * 100 \
-                     + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION \
+  (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
 #if GCC_VERSION < 40901
 #define ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG
 #endif
@@ -996,7 +996,7 @@
 
 // MSVC has a bug with "cv-qualifiers in class construction", fixed in 2017. See
 // https://docs.microsoft.com/en-us/cpp/cpp-conformance-improvements-2017#bug-fixes
-// The compiler some incorrectly ingores the cv-qualifier when generating a
+// The compiler some incorrectly ignores the cv-qualifier when generating a
 // class object via a constructor call. For example:
 //
 // class optional {
@@ -1038,6 +1038,7 @@
 #endif
   EXPECT_EQ("c&&", TypeQuals(OC(absl::in_place, "xvalue_c").value()));
 
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   // test on volatile type
   using OV = absl::optional<volatile int>;
   OV lvalue_v(absl::in_place, 42);
@@ -1045,6 +1046,7 @@
   EXPECT_EQ(42, OV(42).value());
   EXPECT_TRUE((std::is_same<volatile int&, decltype(lvalue_v.value())>::value));
   EXPECT_TRUE((std::is_same<volatile int&&, decltype(OV(42).value())>::value));
+#endif  // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
 
   // test exception throw on value()
   absl::optional<int> empty;
@@ -1087,6 +1089,7 @@
 #endif
   EXPECT_EQ("c&&", TypeQuals(*OC(absl::in_place, "xvalue_c")));
 
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
   // test on volatile type
   using OV = absl::optional<volatile int>;
   OV lvalue_v(absl::in_place, 42);
@@ -1094,6 +1097,7 @@
   EXPECT_EQ(42, *OV(42));
   EXPECT_TRUE((std::is_same<volatile int&, decltype(*lvalue_v)>::value));
   EXPECT_TRUE((std::is_same<volatile int&&, decltype(*OV(42))>::value));
+#endif  // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
 
   constexpr absl::optional<int> opt1(1);
   static_assert(*opt1 == 1, "");
@@ -1196,7 +1200,6 @@
   EXPECT_TRUE(x >= y);
 }
 
-
 template <typename T, typename U, typename V>
 void TestComparisons() {
   absl::optional<T> ae, a2{2}, a4{4};
@@ -1289,7 +1292,6 @@
   EXPECT_TRUE(e1 == e2);
 }
 
-
 TEST(optionalTest, SwapRegression) {
   StructorListener listener;
   Listenable::listener = &listener;
@@ -1505,7 +1507,7 @@
   for (int i = 0; i < 100; ++i) {
     hashcodes.insert(hash(i));
   }
-  EXPECT_GT(hashcodes.size(), 90);
+  EXPECT_GT(hashcodes.size(), 90u);
 
   static_assert(is_hash_enabled_for<absl::optional<int>>::value, "");
   static_assert(is_hash_enabled_for<absl::optional<Hashable>>::value, "");
@@ -1540,8 +1542,7 @@
 struct MoveMeNoThrow {
   MoveMeNoThrow() : x(0) {}
   [[noreturn]] MoveMeNoThrow(const MoveMeNoThrow& other) : x(other.x) {
-    ABSL_RAW_LOG(FATAL, "Should not be called.");
-    abort();
+    LOG(FATAL) << "Should not be called.";
   }
   MoveMeNoThrow(MoveMeNoThrow&& other) noexcept : x(other.x) {}
   int x;
@@ -1558,12 +1559,10 @@
   static_assert(
       std::is_nothrow_move_constructible<absl::optional<MoveMeNoThrow>>::value,
       "");
-#ifndef ABSL_USES_STD_OPTIONAL
   static_assert(absl::default_allocator_is_nothrow::value ==
                     std::is_nothrow_move_constructible<
                         absl::optional<MoveMeThrow>>::value,
                 "");
-#endif
   std::vector<absl::optional<MoveMeNoThrow>> v;
   for (int i = 0; i < 10; ++i) v.emplace_back();
 }
diff --git a/abseil-cpp/absl/types/span.h b/abseil-cpp/absl/types/span.h
index 95fe792..70ed8eb 100644
--- a/abseil-cpp/absl/types/span.h
+++ b/abseil-cpp/absl/types/span.h
@@ -40,7 +40,6 @@
 //    * `absl::Span` has compiler-provided move and copy constructors and
 //      assignment. This is due to them being specified as `constexpr`, but that
 //      implies const in C++11.
-//    * `absl::Span` has no `element_type` typedef
 //    * A read-only `absl::Span<const T>` can be implicitly constructed from an
 //      initializer list.
 //    * `absl::Span` has no `bytes()`, `size_bytes()`, `as_bytes()`, or
@@ -61,6 +60,7 @@
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/internal/throw_delegate.h"
 #include "absl/base/macros.h"
 #include "absl/base/optimization.h"
@@ -161,15 +161,16 @@
 
   // Used to SFINAE-enable a function when the slice elements are const.
   template <typename U>
-  using EnableIfConstView =
+  using EnableIfValueIsConst =
       typename std::enable_if<std::is_const<T>::value, U>::type;
 
   // Used to SFINAE-enable a function when the slice elements are mutable.
   template <typename U>
-  using EnableIfMutableView =
+  using EnableIfValueIsMutable =
       typename std::enable_if<!std::is_const<T>::value, U>::type;
 
  public:
+  using element_type = T;
   using value_type = absl::remove_cv_t<T>;
   using pointer = T*;
   using const_pointer = const T*;
@@ -196,13 +197,34 @@
   // Explicit reference constructor for a mutable `Span<T>` type. Can be
   // replaced with MakeSpan() to infer the type parameter.
   template <typename V, typename = EnableIfConvertibleFrom<V>,
-            typename = EnableIfMutableView<V>>
-  explicit Span(V& v) noexcept  // NOLINT(runtime/references)
+            typename = EnableIfValueIsMutable<V>,
+            typename = span_internal::EnableIfNotIsView<V>>
+  explicit Span(
+      V& v
+          ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept  // NOLINT(runtime/references)
       : Span(span_internal::GetData(v), v.size()) {}
 
   // Implicit reference constructor for a read-only `Span<const T>` type
   template <typename V, typename = EnableIfConvertibleFrom<V>,
-            typename = EnableIfConstView<V>>
+            typename = EnableIfValueIsConst<V>,
+            typename = span_internal::EnableIfNotIsView<V>>
+  constexpr Span(
+      const V& v
+          ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept  // NOLINT(runtime/explicit)
+      : Span(span_internal::GetData(v), v.size()) {}
+
+  // Overloads of the above two functions that are only enabled for view types.
+  // This is so we can drop the ABSL_ATTRIBUTE_LIFETIME_BOUND annotation. These
+  // overloads must be made unique by using a different template parameter list
+  // (hence the = 0 for the IsView enabler).
+  template <typename V, typename = EnableIfConvertibleFrom<V>,
+            typename = EnableIfValueIsMutable<V>,
+            span_internal::EnableIfIsView<V> = 0>
+  explicit Span(V& v) noexcept  // NOLINT(runtime/references)
+      : Span(span_internal::GetData(v), v.size()) {}
+  template <typename V, typename = EnableIfConvertibleFrom<V>,
+            typename = EnableIfValueIsConst<V>,
+            span_internal::EnableIfIsView<V> = 0>
   constexpr Span(const V& v) noexcept  // NOLINT(runtime/explicit)
       : Span(span_internal::GetData(v), v.size()) {}
 
@@ -242,9 +264,9 @@
   //   Process(ints);
   //
   template <typename LazyT = T,
-            typename = EnableIfConstView<LazyT>>
-  Span(
-      std::initializer_list<value_type> v) noexcept  // NOLINT(runtime/explicit)
+            typename = EnableIfValueIsConst<LazyT>>
+  Span(std::initializer_list<value_type> v
+           ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept  // NOLINT(runtime/explicit)
       : Span(v.begin(), v.size()) {}
 
   // Accessors
@@ -274,8 +296,7 @@
   //
   // Returns a reference to the i'th element of this span.
   constexpr reference operator[](size_type i) const noexcept {
-    // MSVC 2015 accepts this as constexpr, but not ptr_[i]
-    return ABSL_HARDENING_ASSERT(i < size()), *(data() + i);
+    return ABSL_HARDENING_ASSERT(i < size()), ptr_[i];
   }
 
   // Span::at()
@@ -398,7 +419,7 @@
   //   absl::MakeSpan(vec).subspan(5);     // throws std::out_of_range
   constexpr Span subspan(size_type pos = 0, size_type len = npos) const {
     return (pos <= size())
-               ? Span(data() + pos, span_internal::Min(size() - pos, len))
+               ? Span(data() + pos, (std::min)(size() - pos, len))
                : (base_internal::ThrowStdOutOfRange("pos > size()"), Span());
   }
 
@@ -664,7 +685,8 @@
 
 template <int&... ExplicitArgumentBarrier, typename T>
 Span<T> MakeSpan(T* begin, T* end) noexcept {
-  return ABSL_HARDENING_ASSERT(begin <= end), Span<T>(begin, end - begin);
+  return ABSL_HARDENING_ASSERT(begin <= end),
+         Span<T>(begin, static_cast<size_t>(end - begin));
 }
 
 template <int&... ExplicitArgumentBarrier, typename C>
diff --git a/abseil-cpp/absl/types/span_test.cc b/abseil-cpp/absl/types/span_test.cc
index 2584339..29e8681 100644
--- a/abseil-cpp/absl/types/span_test.cc
+++ b/abseil-cpp/absl/types/span_test.cc
@@ -191,7 +191,7 @@
 }
 
 void TestInitializerList(absl::Span<const int> s, const std::vector<int>& v) {
-  EXPECT_TRUE(absl::equal(s.begin(), s.end(), v.begin(), v.end()));
+  EXPECT_TRUE(std::equal(s.begin(), s.end(), v.begin(), v.end()));
 }
 
 TEST(ConstIntSpan, InitializerListConversion) {
@@ -661,6 +661,8 @@
   CheckType<absl::Span<int>::const_reverse_iterator>(slice.crend());
   testing::StaticAssertTypeEq<int, absl::Span<int>::value_type>();
   testing::StaticAssertTypeEq<int, absl::Span<const int>::value_type>();
+  testing::StaticAssertTypeEq<int, absl::Span<int>::element_type>();
+  testing::StaticAssertTypeEq<const int, absl::Span<const int>::element_type>();
   testing::StaticAssertTypeEq<int*, absl::Span<int>::pointer>();
   testing::StaticAssertTypeEq<const int*, absl::Span<const int>::pointer>();
   testing::StaticAssertTypeEq<int&, absl::Span<int>::reference>();
diff --git a/abseil-cpp/absl/types/variant.h b/abseil-cpp/absl/types/variant.h
index 776d19a..ac93464 100644
--- a/abseil-cpp/absl/types/variant.h
+++ b/abseil-cpp/absl/types/variant.h
@@ -604,7 +604,10 @@
 
   // emplace() Functions
 
-  // Constructs a value of the given alternative type T within the variant.
+  // Constructs a value of the given alternative type T within the variant. The
+  // existing value of the variant is destroyed first (provided that
+  // `absl::valueless_by_exception()` is false). Requires that T is unambiguous
+  // in the variant.
   //
   // Example:
   //
@@ -624,7 +627,9 @@
   }
 
   // Constructs a value of the given alternative type T within the variant using
-  // an initializer list.
+  // an initializer list. The existing value of the variant is destroyed first
+  // (provided that `absl::valueless_by_exception()` is false). Requires that T
+  // is unambiguous in the variant.
   //
   // Example:
   //
@@ -643,7 +648,7 @@
   }
 
   // Destroys the current value of the variant (provided that
-  // `absl::valueless_by_exception()` is false, and constructs a new value at
+  // `absl::valueless_by_exception()` is false) and constructs a new value at
   // the given index.
   //
   // Example:
@@ -662,7 +667,7 @@
   }
 
   // Destroys the current value of the variant (provided that
-  // `absl::valueless_by_exception()` is false, and constructs a new value at
+  // `absl::valueless_by_exception()` is false) and constructs a new value at
   // the given index using an initializer list and the provided arguments.
   //
   // Example:
diff --git a/abseil-cpp/absl/types/variant_test.cc b/abseil-cpp/absl/types/variant_test.cc
index cf8f7f3..4cd5b7a 100644
--- a/abseil-cpp/absl/types/variant_test.cc
+++ b/abseil-cpp/absl/types/variant_test.cc
@@ -281,7 +281,7 @@
     using X = variant<int>;
     constexpr variant<int> x{};
     ASSERT_FALSE(x.valueless_by_exception());
-    ASSERT_EQ(0, x.index());
+    ASSERT_EQ(0u, x.index());
     EXPECT_EQ(0, absl::get<0>(x));
     EXPECT_TRUE(std::is_nothrow_default_constructible<X>::value);
   }
@@ -290,7 +290,7 @@
     using X = variant<NonNoexceptDefaultConstructible>;
     X x{};
     ASSERT_FALSE(x.valueless_by_exception());
-    ASSERT_EQ(0, x.index());
+    ASSERT_EQ(0u, x.index());
     EXPECT_EQ(5, absl::get<0>(x).value);
     EXPECT_FALSE(std::is_nothrow_default_constructible<X>::value);
   }
@@ -299,7 +299,7 @@
     using X = variant<int, NonNoexceptDefaultConstructible>;
     X x{};
     ASSERT_FALSE(x.valueless_by_exception());
-    ASSERT_EQ(0, x.index());
+    ASSERT_EQ(0u, x.index());
     EXPECT_EQ(0, absl::get<0>(x));
     EXPECT_TRUE(std::is_nothrow_default_constructible<X>::value);
   }
@@ -308,7 +308,7 @@
     using X = variant<NonNoexceptDefaultConstructible, int>;
     X x{};
     ASSERT_FALSE(x.valueless_by_exception());
-    ASSERT_EQ(0, x.index());
+    ASSERT_EQ(0u, x.index());
     EXPECT_EQ(5, absl::get<0>(x).value);
     EXPECT_FALSE(std::is_nothrow_default_constructible<X>::value);
   }
@@ -480,7 +480,7 @@
   ASSERT_TRUE(absl::holds_alternative<std::string>(v2));
   EXPECT_EQ("ABC", absl::get<std::string>(v2));
 
-  Var v3(in_place_type_t<std::string>(), "ABC", 2);
+  Var v3(in_place_type_t<std::string>(), "ABC", 2u);
   ASSERT_TRUE(absl::holds_alternative<std::string>(v3));
   EXPECT_EQ("AB", absl::get<std::string>(v3));
 
@@ -503,7 +503,7 @@
   ASSERT_TRUE(absl::holds_alternative<std::string>(v2));
   EXPECT_EQ("ABC", absl::get<std::string>(v2));
 
-  Var v3(in_place_type<std::string>, "ABC", 2);
+  Var v3(in_place_type<std::string>, "ABC", 2u);
   ASSERT_TRUE(absl::holds_alternative<std::string>(v3));
   EXPECT_EQ("AB", absl::get<std::string>(v3));
 
@@ -544,7 +544,7 @@
   ASSERT_TRUE(absl::holds_alternative<std::string>(v2));
   EXPECT_EQ("ABC", absl::get<std::string>(v2));
 
-  Var v3(in_place_index_t<1>(), "ABC", 2);
+  Var v3(in_place_index_t<1>(), "ABC", 2u);
   ASSERT_TRUE(absl::holds_alternative<std::string>(v3));
   EXPECT_EQ("AB", absl::get<std::string>(v3));
 
@@ -571,7 +571,7 @@
   ASSERT_TRUE(absl::holds_alternative<std::string>(v2));
   EXPECT_EQ("ABC", absl::get<std::string>(v2));
 
-  Var v3(in_place_index<1>, "ABC", 2);
+  Var v3(in_place_index<1>, "ABC", 2u);
   ASSERT_TRUE(absl::holds_alternative<std::string>(v3));
   EXPECT_EQ("AB", absl::get<std::string>(v3));
 
@@ -688,11 +688,11 @@
   EXPECT_EQ(long_str, foo);
 
   variant<int, std::string> so = long_str;
-  ASSERT_EQ(1, so.index());
+  ASSERT_EQ(1u, so.index());
   EXPECT_EQ(long_str, absl::get<1>(so));
   so = *&so;
 
-  ASSERT_EQ(1, so.index());
+  ASSERT_EQ(1u, so.index());
   EXPECT_EQ(long_str, absl::get<1>(so));
 }
 
@@ -968,16 +968,16 @@
   using Var = variant<int, std::string, double>;
 
   Var v = 1;
-  EXPECT_EQ(0, v.index());
+  EXPECT_EQ(0u, v.index());
   v = "str";
-  EXPECT_EQ(1, v.index());
+  EXPECT_EQ(1u, v.index());
   v = 0.;
-  EXPECT_EQ(2, v.index());
+  EXPECT_EQ(2u, v.index());
 
   Var v2 = v;
-  EXPECT_EQ(2, v2.index());
+  EXPECT_EQ(2u, v2.index());
   v2.emplace<int>(3);
-  EXPECT_EQ(0, v2.index());
+  EXPECT_EQ(0u, v2.index());
 }
 
 TEST(VariantTest, NotValuelessByException) {
@@ -1002,11 +1002,11 @@
   using Var = variant<MoveCanThrow, std::string, double>;
 
   Var v(absl::in_place_index<0>);
-  EXPECT_EQ(0, v.index());
+  EXPECT_EQ(0u, v.index());
   ToValuelessByException(v);
   EXPECT_EQ(absl::variant_npos, v.index());
   v = "str";
-  EXPECT_EQ(1, v.index());
+  EXPECT_EQ(1u, v.index());
 }
 
 TEST(VariantTest, ValuelessByException) {
@@ -1084,18 +1084,18 @@
 TEST(VariantTest, VariantSize) {
   {
     using Size1Variant = absl::variant<int>;
-    EXPECT_EQ(1, absl::variant_size<Size1Variant>::value);
-    EXPECT_EQ(1, absl::variant_size<const Size1Variant>::value);
-    EXPECT_EQ(1, absl::variant_size<volatile Size1Variant>::value);
-    EXPECT_EQ(1, absl::variant_size<const volatile Size1Variant>::value);
+    EXPECT_EQ(1u, absl::variant_size<Size1Variant>::value);
+    EXPECT_EQ(1u, absl::variant_size<const Size1Variant>::value);
+    EXPECT_EQ(1u, absl::variant_size<volatile Size1Variant>::value);
+    EXPECT_EQ(1u, absl::variant_size<const volatile Size1Variant>::value);
   }
 
   {
     using Size3Variant = absl::variant<int, float, int>;
-    EXPECT_EQ(3, absl::variant_size<Size3Variant>::value);
-    EXPECT_EQ(3, absl::variant_size<const Size3Variant>::value);
-    EXPECT_EQ(3, absl::variant_size<volatile Size3Variant>::value);
-    EXPECT_EQ(3, absl::variant_size<const volatile Size3Variant>::value);
+    EXPECT_EQ(3u, absl::variant_size<Size3Variant>::value);
+    EXPECT_EQ(3u, absl::variant_size<const Size3Variant>::value);
+    EXPECT_EQ(3u, absl::variant_size<volatile Size3Variant>::value);
+    EXPECT_EQ(3u, absl::variant_size<const volatile Size3Variant>::value);
   }
 }
 
@@ -1799,14 +1799,14 @@
   EXPECT_EQ("B", piece);
 
   struct StrLen {
-    int operator()(const char* s) const { return strlen(s); }
-    int operator()(const std::string& s) const { return s.size(); }
+    size_t operator()(const char* s) const { return strlen(s); }
+    size_t operator()(const std::string& s) const { return s.size(); }
   };
 
   v = "SomeStr";
-  EXPECT_EQ(7, absl::visit(StrLen{}, v));
+  EXPECT_EQ(7u, absl::visit(StrLen{}, v));
   v = std::string("VeryLargeThisTime");
-  EXPECT_EQ(17, absl::visit(StrLen{}, v));
+  EXPECT_EQ(17u, absl::visit(StrLen{}, v));
 }
 
 TEST(VariantTest, VisitRValue) {
@@ -1979,7 +1979,7 @@
 
 TEST(VariantTest, VariantMonostateDefaultConstruction) {
   absl::variant<absl::monostate, NonDefaultConstructible> var;
-  EXPECT_EQ(var.index(), 0);
+  EXPECT_EQ(var.index(), 0u);
 }
 
 ////////////////////////////////
@@ -2100,7 +2100,7 @@
     for (int i = 0; i < 100; ++i) {
       hashcodes.insert(hash(i));
     }
-    EXPECT_GT(hashcodes.size(), 90);
+    EXPECT_GT(hashcodes.size(), 90u);
 
     // test const-qualified
     static_assert(type_traits_internal::IsHashable<variant<const int>>::value,
@@ -2311,9 +2311,10 @@
   ASSERT_TRUE(absl::holds_alternative<int32_t>(variant2));
   EXPECT_EQ(42, absl::get<int32_t>(variant2));
 
-  variant2 = ConvertVariantTo<variant<int32_t, uint32_t>>(variant<uint32_t>(42));
+  variant2 =
+      ConvertVariantTo<variant<int32_t, uint32_t>>(variant<uint32_t>(42u));
   ASSERT_TRUE(absl::holds_alternative<uint32_t>(variant2));
-  EXPECT_EQ(42, absl::get<uint32_t>(variant2));
+  EXPECT_EQ(42u, absl::get<uint32_t>(variant2));
 #endif  // !ABSL_USES_STD_VARIANT
 
   variant<Convertible1, Convertible2> variant3(
@@ -2360,10 +2361,10 @@
   ASSERT_TRUE(absl::holds_alternative<int32_t>(variant2));
   EXPECT_EQ(42, absl::get<int32_t>(variant2));
 
-  variant<uint32_t> source6(42);
+  variant<uint32_t> source6(42u);
   variant2 = ConvertVariantTo<variant<int32_t, uint32_t>>(source6);
   ASSERT_TRUE(absl::holds_alternative<uint32_t>(variant2));
-  EXPECT_EQ(42, absl::get<uint32_t>(variant2));
+  EXPECT_EQ(42u, absl::get<uint32_t>(variant2));
 #endif
 
   variant<Convertible2, Convertible1> source7((Convertible1()));
@@ -2453,8 +2454,9 @@
       ConvertVariantTo<variant<int32_t, uint32_t>>(variant<int32_t>(42)));
   EXPECT_THAT(absl::get_if<int32_t>(&variant2), Pointee(42));
 
-  variant2 = ConvertVariantTo<variant<int32_t, uint32_t>>(variant<uint32_t>(42));
-  EXPECT_THAT(absl::get_if<uint32_t>(&variant2), Pointee(42));
+  variant2 =
+      ConvertVariantTo<variant<int32_t, uint32_t>>(variant<uint32_t>(42u));
+  EXPECT_THAT(absl::get_if<uint32_t>(&variant2), Pointee(42u));
 #endif
 
   variant<Convertible1, Convertible2> variant3(
@@ -2497,9 +2499,9 @@
       ConvertVariantTo<variant<int32_t, uint32_t>>(source5));
   EXPECT_THAT(absl::get_if<int32_t>(&variant2), Pointee(42));
 
-  variant<uint32_t> source6(42);
+  variant<uint32_t> source6(42u);
   variant2 = ConvertVariantTo<variant<int32_t, uint32_t>>(source6);
-  EXPECT_THAT(absl::get_if<uint32_t>(&variant2), Pointee(42));
+  EXPECT_THAT(absl::get_if<uint32_t>(&variant2), Pointee(42u));
 #endif  // !ABSL_USES_STD_VARIANT
 
   variant<Convertible2, Convertible1> source7((Convertible1()));
@@ -2568,7 +2570,7 @@
   vec.reserve(3);
   auto another_vec = absl::move(vec);
   // As a sanity check, verify vector contents.
-  ASSERT_EQ(2, another_vec.size());
+  ASSERT_EQ(2u, another_vec.size());
   EXPECT_EQ(42, *absl::get<std::unique_ptr<int>>(another_vec[0]));
   EXPECT_EQ("Hello", absl::get<std::string>(another_vec[1]));
 }
diff --git a/abseil-cpp/absl/utility/BUILD.bazel b/abseil-cpp/absl/utility/BUILD.bazel
index 02b2c40..061f4c5 100644
--- a/abseil-cpp/absl/utility/BUILD.bazel
+++ b/abseil-cpp/absl/utility/BUILD.bazel
@@ -14,7 +14,6 @@
 # limitations under the License.
 #
 
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
 load(
     "//absl:copts/configure_copts.bzl",
     "ABSL_DEFAULT_COPTS",
@@ -53,3 +52,26 @@
         "@com_google_googletest//:gtest_main",
     ],
 )
+
+cc_library(
+    name = "if_constexpr",
+    hdrs = [
+        "internal/if_constexpr.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+    ],
+)
+
+cc_test(
+    name = "if_constexpr_test",
+    srcs = ["internal/if_constexpr_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":if_constexpr",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/abseil-cpp/absl/utility/CMakeLists.txt b/abseil-cpp/absl/utility/CMakeLists.txt
index e1edd19..27ee0de 100644
--- a/abseil-cpp/absl/utility/CMakeLists.txt
+++ b/abseil-cpp/absl/utility/CMakeLists.txt
@@ -40,5 +40,29 @@
     absl::core_headers
     absl::memory
     absl::strings
-    gmock_main
+    GTest::gmock_main
+)
+
+absl_cc_library(
+  NAME
+    if_constexpr
+  HDRS
+    "internal/if_constexpr.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    if_constexpr_test
+  SRCS
+    "internal/if_constexpr_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::if_constexpr
+    GTest::gmock_main
 )
diff --git a/abseil-cpp/absl/utility/internal/if_constexpr.h b/abseil-cpp/absl/utility/internal/if_constexpr.h
new file mode 100644
index 0000000..7a26311
--- /dev/null
+++ b/abseil-cpp/absl/utility/internal/if_constexpr.h
@@ -0,0 +1,70 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The IfConstexpr and IfConstexprElse utilities in this file are meant to be
+// used to emulate `if constexpr` in pre-C++17 mode in library implementation.
+// The motivation is to allow for avoiding complex SFINAE.
+//
+// The functions passed in must depend on the type(s) of the object(s) that
+// require SFINAE. For example:
+// template<typename T>
+// int MaybeFoo(T& t) {
+//   if constexpr (HasFoo<T>::value) return t.foo();
+//   return 0;
+// }
+//
+// can be written in pre-C++17 as:
+//
+// template<typename T>
+// int MaybeFoo(T& t) {
+//   int i = 0;
+//   absl::utility_internal::IfConstexpr<HasFoo<T>::value>(
+//       [&](const auto& fooer) { i = fooer.foo(); }, t);
+//   return i;
+// }
+
+#ifndef ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
+#define ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
+
+#include <tuple>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace utility_internal {
+
+template <bool condition, typename TrueFunc, typename FalseFunc,
+          typename... Args>
+auto IfConstexprElse(TrueFunc&& true_func, FalseFunc&& false_func,
+                     Args&&... args) {
+  return std::get<condition>(std::forward_as_tuple(
+      std::forward<FalseFunc>(false_func), std::forward<TrueFunc>(true_func)))(
+      std::forward<Args>(args)...);
+}
+
+template <bool condition, typename Func, typename... Args>
+void IfConstexpr(Func&& func, Args&&... args) {
+  IfConstexprElse<condition>(std::forward<Func>(func), [](auto&&...){},
+                             std::forward<Args>(args)...);
+}
+
+}  // namespace utility_internal
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
diff --git a/abseil-cpp/absl/utility/internal/if_constexpr_test.cc b/abseil-cpp/absl/utility/internal/if_constexpr_test.cc
new file mode 100644
index 0000000..d1ee723
--- /dev/null
+++ b/abseil-cpp/absl/utility/internal/if_constexpr_test.cc
@@ -0,0 +1,79 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/utility/internal/if_constexpr.h"
+
+#include <utility>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+struct Empty {};
+struct HasFoo {
+  int foo() const { return 1; }
+};
+
+TEST(IfConstexpr, Basic) {
+  int i = 0;
+  absl::utility_internal::IfConstexpr<false>(
+      [&](const auto& t) { i = t.foo(); }, Empty{});
+  EXPECT_EQ(i, 0);
+
+  absl::utility_internal::IfConstexpr<false>(
+      [&](const auto& t) { i = t.foo(); }, HasFoo{});
+  EXPECT_EQ(i, 0);
+
+  absl::utility_internal::IfConstexpr<true>(
+      [&](const auto& t) { i = t.foo(); }, HasFoo{});
+  EXPECT_EQ(i, 1);
+}
+
+TEST(IfConstexprElse, Basic) {
+  EXPECT_EQ(absl::utility_internal::IfConstexprElse<false>(
+      [&](const auto& t) { return t.foo(); }, [&](const auto&) { return 2; },
+      Empty{}), 2);
+
+  EXPECT_EQ(absl::utility_internal::IfConstexprElse<false>(
+      [&](const auto& t) { return t.foo(); }, [&](const auto&) { return 2; },
+      HasFoo{}), 2);
+
+  EXPECT_EQ(absl::utility_internal::IfConstexprElse<true>(
+      [&](const auto& t) { return t.foo(); }, [&](const auto&) { return 2; },
+      HasFoo{}), 1);
+}
+
+struct HasFooRValue {
+  int foo() && { return 1; }
+};
+struct RValueFunc {
+  void operator()(HasFooRValue&& t) && { *i = std::move(t).foo(); }
+
+  int* i = nullptr;
+};
+
+TEST(IfConstexpr, RValues) {
+  int i = 0;
+  RValueFunc func = {&i};
+  absl::utility_internal::IfConstexpr<false>(
+      std::move(func), HasFooRValue{});
+  EXPECT_EQ(i, 0);
+
+  func = RValueFunc{&i};
+  absl::utility_internal::IfConstexpr<true>(
+      std::move(func), HasFooRValue{});
+  EXPECT_EQ(i, 1);
+}
+
+}  // namespace
diff --git a/abseil-cpp/absl/utility/utility_test.cc b/abseil-cpp/absl/utility/utility_test.cc
index f044ad6..2f0509a 100644
--- a/abseil-cpp/absl/utility/utility_test.cc
+++ b/abseil-cpp/absl/utility/utility_test.cc
@@ -1,4 +1,4 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2022 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,10 +14,12 @@
 
 #include "absl/utility/utility.h"
 
+#include <memory>
 #include <sstream>
 #include <string>
 #include <tuple>
 #include <type_traits>
+#include <utility>
 #include <vector>
 
 #include "gmock/gmock.h"
@@ -35,10 +37,10 @@
 // Both the unused variables and the name length warnings are due to calls
 // to absl::make_index_sequence with very large values, creating very long type
 // names. The resulting warnings are so long they make build output unreadable.
-#pragma warning( push )
-#pragma warning( disable : 4503 )  // decorated name length exceeded
-#pragma warning( disable : 4101 )  // unreferenced local variable
-#endif  // _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4503)  // decorated name length exceeded
+#pragma warning(disable : 4101)  // unreferenced local variable
+#endif                           // _MSC_VER
 
 using ::testing::ElementsAre;
 using ::testing::Pointee;
@@ -227,8 +229,7 @@
 }
 
 TEST(ApplyTest, NonCopyableResult) {
-  EXPECT_THAT(absl::apply(Factory, std::make_tuple(42)),
-              ::testing::Pointee(42));
+  EXPECT_THAT(absl::apply(Factory, std::make_tuple(42)), Pointee(42));
 }
 
 TEST(ApplyTest, VoidResult) { absl::apply(NoOp, std::tuple<>()); }
@@ -373,4 +374,3 @@
 }
 
 }  // namespace
-
diff --git a/abseil-cpp/ci/absl_alternate_options.h b/abseil-cpp/ci/absl_alternate_options.h
index 29b020d..82d2ecf 100644
--- a/abseil-cpp/ci/absl_alternate_options.h
+++ b/abseil-cpp/ci/absl_alternate_options.h
@@ -15,8 +15,8 @@
 // Alternate options.h file, used in continuous integration testing to exercise
 // option settings not used by default.
 
-#ifndef ABSL_BASE_OPTIONS_H_
-#define ABSL_BASE_OPTIONS_H_
+#ifndef ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_
+#define ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_
 
 #define ABSL_OPTION_USE_STD_ANY 0
 #define ABSL_OPTION_USE_STD_OPTIONAL 0
@@ -26,4 +26,4 @@
 #define ABSL_OPTION_INLINE_NAMESPACE_NAME ns
 #define ABSL_OPTION_HARDENED 1
 
-#endif  // ABSL_BASE_OPTIONS_H_
+#endif  // ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_
diff --git a/abseil-cpp/ci/cmake_common.sh b/abseil-cpp/ci/cmake_common.sh
new file mode 100644
index 0000000..051b70d
--- /dev/null
+++ b/abseil-cpp/ci/cmake_common.sh
@@ -0,0 +1,25 @@
+# Copyright 2020 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The commit of GoogleTest to be used in the CMake tests in this directory.
+# Keep this in sync with the commit in the WORKSPACE file.
+readonly ABSL_GOOGLETEST_COMMIT="f8d7d77c06936315286eb55f8de22cd23c188571"  # v1.14.0
+
+# Avoid depending on GitHub by looking for a cached copy of the commit first.
+if [[ -r "${KOKORO_GFILE_DIR:-}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
+  ABSL_GOOGLETEST_DOWNLOAD_URL="file:///distdir/${ABSL_GOOGLETEST_COMMIT}.zip"
+else
+  ABSL_GOOGLETEST_DOWNLOAD_URL="https://github.com/google/googletest/archive/${ABSL_GOOGLETEST_COMMIT}.zip"
+fi
diff --git a/abseil-cpp/ci/cmake_install_test.sh b/abseil-cpp/ci/cmake_install_test.sh
index b31e4b8..ab3b86f 100755
--- a/abseil-cpp/ci/cmake_install_test.sh
+++ b/abseil-cpp/ci/cmake_install_test.sh
@@ -20,16 +20,39 @@
   ABSEIL_ROOT="$(realpath $(dirname ${0})/..)"
 fi
 
+if [[ -z ${LINK_TYPE:-} ]]; then
+  LINK_TYPE="STATIC DYNAMIC"
+fi
+
+source "${ABSEIL_ROOT}/ci/cmake_common.sh"
+
 source "${ABSEIL_ROOT}/ci/linux_docker_containers.sh"
 readonly DOCKER_CONTAINER=${LINUX_GCC_LATEST_CONTAINER}
 
+# Verify that everything works with the standard "cmake && make && make install"
+# without building tests or requiring GoogleTest.
 time docker run \
-    --volume="${ABSEIL_ROOT}:/abseil-cpp:ro" \
-    --workdir=/abseil-cpp \
+    --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp-ro,readonly \
     --tmpfs=/buildfs:exec \
-    --cap-add=SYS_PTRACE \
+    --workdir=/buildfs \
     --rm \
-    -e CFLAGS="-Werror" \
-    -e CXXFLAGS="-Werror" \
+    ${DOCKER_EXTRA_ARGS:-} \
     ${DOCKER_CONTAINER} \
-    /bin/bash CMake/install_test_project/test.sh $@
+    /bin/bash -c "cmake /abseil-cpp-ro && make -j$(nproc) && make install"
+
+# Verify that a more complicated project works.
+for link_type in ${LINK_TYPE}; do
+  time docker run \
+    --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp-ro,readonly \
+    --tmpfs=/buildfs:exec \
+    --tmpfs=/abseil-cpp:exec \
+    --workdir=/abseil-cpp \
+    --cap-add=SYS_PTRACE \
+    -e "ABSL_GOOGLETEST_COMMIT=${ABSL_GOOGLETEST_COMMIT}" \
+    -e "ABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL}" \
+    -e "LINK_TYPE=${link_type}" \
+    --rm \
+    ${DOCKER_EXTRA_ARGS:-} \
+    ${DOCKER_CONTAINER} \
+    /bin/bash -c "cp -r /abseil-cpp-ro/* . && CMake/install_test_project/test.sh"
+done
diff --git a/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh b/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh
index 2aed43c..f9c146b 100755
--- a/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh
+++ b/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh
@@ -25,7 +25,7 @@
 fi
 
 if [[ -z ${STD:-} ]]; then
-  STD="c++11 c++14 c++17 c++20"
+  STD="c++14 c++17 c++20"
 fi
 
 if [[ -z ${COMPILATION_MODE:-} ]]; then
@@ -42,7 +42,7 @@
 # USE_BAZEL_CACHE=1 only works on Kokoro.
 # Without access to the credentials this won't work.
 if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_KEYSTORE_DIR}:/keystore:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_KEYSTORE_DIR},target=/keystore,readonly ${DOCKER_EXTRA_ARGS:-}"
   # Bazel doesn't track changes to tools outside of the workspace
   # (e.g. /usr/bin/gcc), so by appending the docker container to the
   # remote_http_cache url, we make changes to the container part of
@@ -55,7 +55,7 @@
 # external dependencies first.
 # https://docs.bazel.build/versions/master/guide.html#distdir
 if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_GFILE_DIR}/distdir:/distdir:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
   BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}"
 fi
 
@@ -64,26 +64,28 @@
     for exceptions_mode in ${EXCEPTIONS_MODE}; do
       echo "--------------------------------------------------------------------"
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp:ro" \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
         --workdir=/abseil-cpp \
         --cap-add=SYS_PTRACE \
         --rm \
         -e CC="/opt/llvm/clang/bin/clang" \
-        -e BAZEL_COMPILER="llvm" \
         -e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \
-        -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \
-        -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/c++/v1" \
+        -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu" \
+        -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx/include/c++/v1" \
         ${DOCKER_EXTRA_ARGS:-} \
         ${DOCKER_CONTAINER} \
         /usr/local/bin/bazel test ... \
           --compilation_mode="${compilation_mode}" \
           --copt="${exceptions_mode}" \
+          --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
           --copt="-fsanitize=address" \
           --copt="-fsanitize=float-divide-by-zero" \
           --copt="-fsanitize=nullability" \
           --copt="-fsanitize=undefined" \
           --copt="-fno-sanitize-blacklist" \
           --copt=-Werror \
+          --distdir="/bazel-distdir" \
+          --features=external_include_paths \
           --keep_going \
           --linkopt="-fsanitize=address" \
           --linkopt="-fsanitize-link-c++-runtime" \
diff --git a/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh b/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh
index eb04e69..38b2d74 100755
--- a/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh
+++ b/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh
@@ -25,7 +25,7 @@
 fi
 
 if [[ -z ${STD:-} ]]; then
-  STD="c++11 c++14 c++17 c++20"
+  STD="c++14 c++17 c++20"
 fi
 
 if [[ -z ${COMPILATION_MODE:-} ]]; then
@@ -42,7 +42,7 @@
 # USE_BAZEL_CACHE=1 only works on Kokoro.
 # Without access to the credentials this won't work.
 if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_KEYSTORE_DIR}:/keystore:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_KEYSTORE_DIR},target=/keystore,readonly ${DOCKER_EXTRA_ARGS:-}"
   # Bazel doesn't track changes to tools outside of the workspace
   # (e.g. /usr/bin/gcc), so by appending the docker container to the
   # remote_http_cache url, we make changes to the container part of
@@ -55,7 +55,7 @@
 # external dependencies first.
 # https://docs.bazel.build/versions/master/guide.html#distdir
 if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_GFILE_DIR}/distdir:/distdir:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
   BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}"
 fi
 
@@ -64,16 +64,15 @@
     for exceptions_mode in ${EXCEPTIONS_MODE}; do
       echo "--------------------------------------------------------------------"
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp-ro:ro" \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp-ro,readonly \
         --tmpfs=/abseil-cpp \
         --workdir=/abseil-cpp \
         --cap-add=SYS_PTRACE \
         --rm \
         -e CC="/opt/llvm/clang/bin/clang" \
-        -e BAZEL_COMPILER="llvm" \
         -e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \
-        -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \
-        -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/c++/v1" \
+        -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu" \
+        -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx/include/c++/v1" \
         ${DOCKER_EXTRA_ARGS:-} \
         ${DOCKER_CONTAINER} \
         /bin/sh -c "
@@ -84,8 +83,11 @@
           /usr/local/bin/bazel test ... \
             --compilation_mode=\"${compilation_mode}\" \
             --copt=\"${exceptions_mode}\" \
+            --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \
             --copt=-Werror \
             --define=\"absl=1\" \
+            --distdir=\"/bazel-distdir\" \
+            --features=external_include_paths \
             --keep_going \
             --show_timestamps \
             --test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
diff --git a/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh b/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh
index b39eaf7..34d7940 100755
--- a/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh
+++ b/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh
@@ -25,7 +25,7 @@
 fi
 
 if [[ -z ${STD:-} ]]; then
-  STD="c++11 c++14 c++17 c++20"
+  STD="c++14 c++17 c++20"
 fi
 
 if [[ -z ${COMPILATION_MODE:-} ]]; then
@@ -42,7 +42,7 @@
 # USE_BAZEL_CACHE=1 only works on Kokoro.
 # Without access to the credentials this won't work.
 if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_KEYSTORE_DIR}:/keystore:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_KEYSTORE_DIR},target=/keystore,readonly ${DOCKER_EXTRA_ARGS:-}"
   # Bazel doesn't track changes to tools outside of the workspace
   # (e.g. /usr/bin/gcc), so by appending the docker container to the
   # remote_http_cache url, we make changes to the container part of
@@ -55,7 +55,7 @@
 # external dependencies first.
 # https://docs.bazel.build/versions/master/guide.html#distdir
 if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_GFILE_DIR}/distdir:/distdir:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
   BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}"
 fi
 
@@ -64,28 +64,29 @@
     for exceptions_mode in ${EXCEPTIONS_MODE}; do
       echo "--------------------------------------------------------------------"
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp:ro" \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
         --workdir=/abseil-cpp \
         --cap-add=SYS_PTRACE \
         --rm \
         -e CC="/opt/llvm/clang/bin/clang" \
-        -e BAZEL_COMPILER="llvm" \
         -e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \
-        -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx-tsan/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib" \
-        -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx-tsan/include/c++/v1" \
+        -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx-tsan/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib/x86_64-unknown-linux-gnu" \
+        -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx-tsan/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx-tsan/include/c++/v1" \
         ${DOCKER_EXTRA_ARGS:-} \
         ${DOCKER_CONTAINER} \
         /usr/local/bin/bazel test ... \
           --build_tag_filters="-notsan" \
           --compilation_mode="${compilation_mode}" \
           --copt="${exceptions_mode}" \
+          --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
           --copt="-fsanitize=thread" \
           --copt="-fno-sanitize-blacklist" \
           --copt=-Werror \
+          --distdir="/bazel-distdir" \
+          --features=external_include_paths \
           --keep_going \
           --linkopt="-fsanitize=thread" \
           --show_timestamps \
-          --test_env="TSAN_OPTIONS=report_atomic_races=0" \
           --test_env="TSAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer" \
           --test_env="TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo" \
           --test_output=errors \
diff --git a/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh b/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh
index 4e49067..13d56fc 100755
--- a/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh
+++ b/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh
@@ -25,7 +25,7 @@
 fi
 
 if [[ -z ${STD:-} ]]; then
-  STD="c++11 c++14 c++17 c++20"
+  STD="c++14 c++17"
 fi
 
 if [[ -z ${COMPILATION_MODE:-} ]]; then
@@ -42,7 +42,7 @@
 # USE_BAZEL_CACHE=1 only works on Kokoro.
 # Without access to the credentials this won't work.
 if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_KEYSTORE_DIR}:/keystore:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_KEYSTORE_DIR},target=/keystore,readonly ${DOCKER_EXTRA_ARGS:-}"
   # Bazel doesn't track changes to tools outside of the workspace
   # (e.g. /usr/bin/gcc), so by appending the docker container to the
   # remote_http_cache url, we make changes to the container part of
@@ -55,7 +55,7 @@
 # external dependencies first.
 # https://docs.bazel.build/versions/master/guide.html#distdir
 if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_GFILE_DIR}/distdir:/distdir:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
   BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}"
 fi
 
@@ -64,21 +64,24 @@
     for exceptions_mode in ${EXCEPTIONS_MODE}; do
       echo "--------------------------------------------------------------------"
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp:ro" \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
         --workdir=/abseil-cpp \
         --cap-add=SYS_PTRACE \
         --rm \
         -e CC="/opt/llvm/clang/bin/clang" \
-        -e BAZEL_COMPILER="llvm" \
         -e BAZEL_CXXOPTS="-std=${std}" \
         ${DOCKER_EXTRA_ARGS:-} \
         ${DOCKER_CONTAINER} \
         /usr/local/bin/bazel test ... \
           --compilation_mode="${compilation_mode}" \
           --copt="--gcc-toolchain=/usr/local" \
+          --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
           --copt="${exceptions_mode}" \
+          --copt="-march=haswell" \
           --copt=-Werror \
           --define="absl=1" \
+          --distdir="/bazel-distdir" \
+          --features=external_include_paths \
           --keep_going \
           --linkopt="--gcc-toolchain=/usr/local" \
           --show_timestamps \
diff --git a/abseil-cpp/ci/linux_docker_containers.sh b/abseil-cpp/ci/linux_docker_containers.sh
index e42fa58..a07c64c 100644
--- a/abseil-cpp/ci/linux_docker_containers.sh
+++ b/abseil-cpp/ci/linux_docker_containers.sh
@@ -15,7 +15,7 @@
 # The file contains Docker container identifiers currently used by test scripts.
 # Test scripts should source this file to get the identifiers.
 
-readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20191016"
-readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20200909"
-readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20200909"
-readonly LINUX_GCC_49_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-4.9:20191018"
+readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20230612"
+readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20230217"
+readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20230517"
+readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20230120"
diff --git a/abseil-cpp/ci/linux_gcc-4.9_libstdcxx_bazel.sh b/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh
similarity index 91%
rename from abseil-cpp/ci/linux_gcc-4.9_libstdcxx_bazel.sh
rename to abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh
index 8e6540c..68b3999 100755
--- a/abseil-cpp/ci/linux_gcc-4.9_libstdcxx_bazel.sh
+++ b/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Copyright 2019 The Abseil Authors.
+# Copyright 2020 The Abseil Authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@
 fi
 
 if [[ -z ${STD:-} ]]; then
-  STD="c++11 c++14"
+  STD="c++14"
 fi
 
 if [[ -z ${COMPILATION_MODE:-} ]]; then
@@ -37,7 +37,7 @@
 fi
 
 source "${ABSEIL_ROOT}/ci/linux_docker_containers.sh"
-readonly DOCKER_CONTAINER=${LINUX_GCC_49_CONTAINER}
+readonly DOCKER_CONTAINER=${LINUX_GCC_FLOOR_CONTAINER}
 
 # USE_BAZEL_CACHE=1 only works on Kokoro.
 # Without access to the credentials this won't work.
@@ -68,15 +68,18 @@
         --workdir=/abseil-cpp \
         --cap-add=SYS_PTRACE \
         --rm \
-        -e CC="/usr/bin/gcc-4.9" \
+        -e CC="/usr/local/bin/gcc" \
         -e BAZEL_CXXOPTS="-std=${std}" \
         ${DOCKER_EXTRA_ARGS:-} \
         ${DOCKER_CONTAINER} \
         /usr/local/bin/bazel test ... \
           --compilation_mode="${compilation_mode}" \
           --copt="${exceptions_mode}" \
+          --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
           --copt=-Werror \
           --define="absl=1" \
+          --distdir="/bazel-distdir" \
+          --features=external_include_paths \
           --keep_going \
           --show_timestamps \
           --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" \
diff --git a/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh b/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh
index b327405..091acb3 100755
--- a/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh
+++ b/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh
@@ -25,7 +25,7 @@
 fi
 
 if [[ -z ${STD:-} ]]; then
-  STD="c++11 c++14 c++17 c++20"
+  STD="c++14 c++17 c++20"
 fi
 
 if [[ -z ${COMPILATION_MODE:-} ]]; then
@@ -42,7 +42,7 @@
 # USE_BAZEL_CACHE=1 only works on Kokoro.
 # Without access to the credentials this won't work.
 if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_KEYSTORE_DIR}:/keystore:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_KEYSTORE_DIR},target=/keystore,readonly ${DOCKER_EXTRA_ARGS:-}"
   # Bazel doesn't track changes to tools outside of the workspace
   # (e.g. /usr/bin/gcc), so by appending the docker container to the
   # remote_http_cache url, we make changes to the container part of
@@ -55,7 +55,7 @@
 # external dependencies first.
 # https://docs.bazel.build/versions/master/guide.html#distdir
 if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
-  DOCKER_EXTRA_ARGS="--volume=${KOKORO_GFILE_DIR}/distdir:/distdir:ro ${DOCKER_EXTRA_ARGS:-}"
+  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
   BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}"
 fi
 
@@ -64,7 +64,7 @@
     for exceptions_mode in ${EXCEPTIONS_MODE}; do
       echo "--------------------------------------------------------------------"
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp-ro:ro" \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp-ro,readonly \
         --tmpfs=/abseil-cpp \
         --workdir=/abseil-cpp \
         --cap-add=SYS_PTRACE \
@@ -81,8 +81,11 @@
           /usr/local/bin/bazel test ... \
             --compilation_mode=\"${compilation_mode}\" \
             --copt=\"${exceptions_mode}\" \
+            --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \
             --copt=-Werror \
             --define=\"absl=1\" \
+            --distdir=\"/bazel-distdir\" \
+            --features=external_include_paths \
             --keep_going \
             --show_timestamps \
             --test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \
diff --git a/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh b/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh
index 26415e2..1f72123 100755
--- a/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh
+++ b/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh
@@ -14,20 +14,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# TODO(absl-team): This script isn't fully hermetic because
-# -DABSL_USE_GOOGLETEST_HEAD=ON means that this script isn't pinned to a fixed
-# version of GoogleTest. This means that an upstream change to GoogleTest could
-# break this test. Fix this by allowing this script to pin to a known-good
-# version of GoogleTest.
-
 set -euox pipefail
 
 if [[ -z ${ABSEIL_ROOT:-} ]]; then
   ABSEIL_ROOT="$(realpath $(dirname ${0})/..)"
 fi
 
+source "${ABSEIL_ROOT}/ci/cmake_common.sh"
+
 if [[ -z ${ABSL_CMAKE_CXX_STANDARDS:-} ]]; then
-  ABSL_CMAKE_CXX_STANDARDS="11 14 17 20"
+  ABSL_CMAKE_CXX_STANDARDS="14 17 20"
 fi
 
 if [[ -z ${ABSL_CMAKE_BUILD_TYPES:-} ]]; then
@@ -45,24 +41,25 @@
   for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do
     for build_shared in ${ABSL_CMAKE_BUILD_SHARED}; do
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp:ro" \
-        --workdir=/abseil-cpp \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
         --tmpfs=/buildfs:exec \
+        --workdir=/buildfs \
         --cap-add=SYS_PTRACE \
         --rm \
         -e CFLAGS="-Werror" \
         -e CXXFLAGS="-Werror" \
-        ${DOCKER_CONTAINER} \
+        ${DOCKER_EXTRA_ARGS:-} \
+        "${DOCKER_CONTAINER}" \
         /bin/bash -c "
-          cd /buildfs && \
           cmake /abseil-cpp \
-            -DABSL_USE_GOOGLETEST_HEAD=ON \
-            -DABSL_RUN_TESTS=ON \
+            -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
             -DBUILD_SHARED_LIBS=${build_shared} \
+            -DABSL_BUILD_TESTING=ON \
             -DCMAKE_BUILD_TYPE=${compilation_mode} \
             -DCMAKE_CXX_STANDARD=${std} \
             -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
           make -j$(nproc) && \
+          TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \
           ctest -j$(nproc) --output-on-failure"
     done
   done
diff --git a/abseil-cpp/ci/linux_gcc_alpine_cmake.sh b/abseil-cpp/ci/linux_gcc_alpine_cmake.sh
index b3b8e7a..b784456 100755
--- a/abseil-cpp/ci/linux_gcc_alpine_cmake.sh
+++ b/abseil-cpp/ci/linux_gcc_alpine_cmake.sh
@@ -14,20 +14,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# TODO(absl-team): This script isn't fully hermetic because
-# -DABSL_USE_GOOGLETEST_HEAD=ON means that this script isn't pinned to a fixed
-# version of GoogleTest. This means that an upstream change to GoogleTest could
-# break this test. Fix this by allowing this script to pin to a known-good
-# version of GoogleTest.
-
 set -euox pipefail
 
 if [[ -z ${ABSEIL_ROOT:-} ]]; then
   ABSEIL_ROOT="$(realpath $(dirname ${0})/..)"
 fi
 
+source "${ABSEIL_ROOT}/ci/cmake_common.sh"
+
 if [[ -z ${ABSL_CMAKE_CXX_STANDARDS:-} ]]; then
-  ABSL_CMAKE_CXX_STANDARDS="11 14 17"
+  ABSL_CMAKE_CXX_STANDARDS="14 17"
 fi
 
 if [[ -z ${ABSL_CMAKE_BUILD_TYPES:-} ]]; then
@@ -45,23 +41,24 @@
   for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do
     for build_shared in ${ABSL_CMAKE_BUILD_SHARED}; do
       time docker run \
-        --volume="${ABSEIL_ROOT}:/abseil-cpp:ro" \
-        --workdir=/abseil-cpp \
+        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
         --tmpfs=/buildfs:exec \
+        --workdir=/buildfs \
         --cap-add=SYS_PTRACE \
         --rm \
         -e CFLAGS="-Werror" \
         -e CXXFLAGS="-Werror" \
+        ${DOCKER_EXTRA_ARGS:-} \
         "${DOCKER_CONTAINER}" \
         /bin/sh -c "
-          cd /buildfs && \
           cmake /abseil-cpp \
-            -DABSL_USE_GOOGLETEST_HEAD=ON \
-            -DABSL_RUN_TESTS=ON \
+            -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
+            -DABSL_BUILD_TESTING=ON \
             -DCMAKE_BUILD_TYPE=${compilation_mode} \
             -DCMAKE_CXX_STANDARD=${std} \
             -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
           make -j$(nproc) && \
+          TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \
           ctest -j$(nproc) --output-on-failure"
     done
   done
diff --git a/abseil-cpp/ci/macos_xcode_bazel.sh b/abseil-cpp/ci/macos_xcode_bazel.sh
index 738adf9..04c9a1a 100755
--- a/abseil-cpp/ci/macos_xcode_bazel.sh
+++ b/abseil-cpp/ci/macos_xcode_bazel.sh
@@ -24,7 +24,7 @@
 fi
 
 # If we are running on Kokoro, check for a versioned Bazel binary.
-KOKORO_GFILE_BAZEL_BIN="bazel-2.0.0-darwin-x86_64"
+KOKORO_GFILE_BAZEL_BIN="bazel-5.1.1-darwin-x86_64"
 if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f ${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN} ]]; then
   BAZEL_BIN="${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN}"
   chmod +x ${BAZEL_BIN}
@@ -32,6 +32,13 @@
   BAZEL_BIN="bazel"
 fi
 
+# Avoid depending on external sites like GitHub by checking --distdir for
+# external dependencies first.
+# https://docs.bazel.build/versions/master/guide.html#distdir
+if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
+  BAZEL_EXTRA_ARGS="--distdir=${KOKORO_GFILE_DIR}/distdir ${BAZEL_EXTRA_ARGS:-}"
+fi
+
 # Print the compiler and Bazel versions.
 echo "---------------"
 gcc -v
@@ -46,9 +53,13 @@
 fi
 
 ${BAZEL_BIN} test ... \
-  --copt=-Werror \
+  --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
+  --copt="-Werror" \
+  --cxxopt="-std=c++14" \
+  --features=external_include_paths \
   --keep_going \
   --show_timestamps \
   --test_env="TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo" \
   --test_output=errors \
-  --test_tag_filters=-benchmark
+  --test_tag_filters=-benchmark \
+  ${BAZEL_EXTRA_ARGS:-}
diff --git a/abseil-cpp/ci/macos_xcode_cmake.sh b/abseil-cpp/ci/macos_xcode_cmake.sh
index d90e273..690f86b 100755
--- a/abseil-cpp/ci/macos_xcode_cmake.sh
+++ b/abseil-cpp/ci/macos_xcode_cmake.sh
@@ -14,9 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This script is invoked on Kokoro to test Abseil on macOS.
-# It is not hermetic and may break when Kokoro is updated.
-
 set -euox pipefail
 
 if [[ -z ${ABSEIL_ROOT:-} ]]; then
@@ -24,6 +21,13 @@
 fi
 ABSEIL_ROOT=$(realpath ${ABSEIL_ROOT})
 
+source "${ABSEIL_ROOT}/ci/cmake_common.sh"
+
+# The MacOS build doesn't run in a docker container, so we have to override ABSL_GOOGLETEST_DOWNLOAD_URL.
+if [[ -r "${KOKORO_GFILE_DIR}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then
+  ABSL_GOOGLETEST_DOWNLOAD_URL="file://${KOKORO_GFILE_DIR}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip"
+fi
+
 if [[ -z ${ABSL_CMAKE_BUILD_TYPES:-} ]]; then
   ABSL_CMAKE_BUILD_TYPES="Debug"
 fi
@@ -41,12 +45,13 @@
     time cmake ${ABSEIL_ROOT} \
       -GXcode \
       -DBUILD_SHARED_LIBS=${build_shared} \
+      -DABSL_BUILD_TESTING=ON \
       -DCMAKE_BUILD_TYPE=${compilation_mode} \
-      -DCMAKE_CXX_STANDARD=11 \
+      -DCMAKE_CXX_STANDARD=14 \
       -DCMAKE_MODULE_LINKER_FLAGS="-Wl,--no-undefined" \
-      -DABSL_USE_GOOGLETEST_HEAD=ON \
-      -DABSL_RUN_TESTS=ON
+      -DABSL_GOOGLETEST_DOWNLOAD_URL="${ABSL_GOOGLETEST_DOWNLOAD_URL}"
     time cmake --build .
-    time ctest -C ${compilation_mode} --output-on-failure
+    time TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo \
+      ctest -C ${compilation_mode} --output-on-failure
   done
 done
diff --git a/abseil-cpp/ci/windows_clangcl_bazel.bat b/abseil-cpp/ci/windows_clangcl_bazel.bat
new file mode 100755
index 0000000..21230e1
--- /dev/null
+++ b/abseil-cpp/ci/windows_clangcl_bazel.bat
@@ -0,0 +1,59 @@
+:: Copyright 2023 The Abseil Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+::     https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+SETLOCAL ENABLEDELAYEDEXPANSION
+
+:: Set LLVM directory.
+SET BAZEL_LLVM=C:\Program Files\LLVM
+
+:: Change directory to the root of the project.
+CD %~dp0\..
+if %errorlevel% neq 0 EXIT /B 1
+
+:: Set the standard version, [c++14|c++17|c++20|c++latest]
+:: https://msdn.microsoft.com/en-us/library/mt490614.aspx
+:: The default is c++14 if not set on command line.
+IF "%STD%"=="" SET STD=c++14
+
+:: Set the compilation_mode (fastbuild|opt|dbg)
+:: https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode
+:: The default is fastbuild
+IF "%COMPILATION_MODE%"=="" SET COMPILATION_MODE=fastbuild
+
+:: Copy the alternate option file, if specified.
+IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h
+
+:: To upgrade Bazel, first download a new binary from
+:: https://github.com/bazelbuild/bazel/releases and copy it to
+:: /google/data/rw/teams/absl/kokoro/windows.
+::
+:: TODO(absl-team): Remove -Wno-microsoft-cast
+%KOKORO_GFILE_DIR%\bazel-5.1.1-windows-x86_64.exe ^
+  test ... ^
+  --compilation_mode=%COMPILATION_MODE% ^
+  --compiler=clang-cl ^
+  --copt=/std:%STD% ^
+  --copt=/WX ^
+  --copt=-Wno-microsoft-cast ^
+  --define=absl=1 ^
+  --distdir=%KOKORO_GFILE_DIR%\distdir ^
+  --features=external_include_paths ^
+  --keep_going ^
+  --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
+  --test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^
+  --test_output=errors ^
+  --test_tag_filters=-benchmark
+
+if %errorlevel% neq 0 EXIT /B 1
+EXIT /B 0
diff --git a/abseil-cpp/ci/windows_msvc_bazel.bat b/abseil-cpp/ci/windows_msvc_bazel.bat
new file mode 100755
index 0000000..11d9f35
--- /dev/null
+++ b/abseil-cpp/ci/windows_msvc_bazel.bat
@@ -0,0 +1,52 @@
+:: Copyright 2023 The Abseil Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+::     https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+SETLOCAL ENABLEDELAYEDEXPANSION
+
+:: Change directory to the root of the project.
+CD %~dp0\..
+if %errorlevel% neq 0 EXIT /B 1
+
+:: Set the standard version, [c++14|c++latest]
+:: https://msdn.microsoft.com/en-us/library/mt490614.aspx
+:: The default is c++14 if not set on command line.
+IF "%STD%"=="" SET STD=c++14
+
+:: Set the compilation_mode (fastbuild|opt|dbg)
+:: https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode
+:: The default is fastbuild
+IF "%COMPILATION_MODE%"=="" SET COMPILATION_MODE=fastbuild
+
+:: Copy the alternate option file, if specified.
+IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h
+
+:: To upgrade Bazel, first download a new binary from
+:: https://github.com/bazelbuild/bazel/releases and copy it to
+:: /google/data/rw/teams/absl/kokoro/windows.
+%KOKORO_GFILE_DIR%\bazel-5.1.1-windows-x86_64.exe ^
+  test ... ^
+  --compilation_mode=%COMPILATION_MODE% ^
+  --copt=/WX ^
+  --copt=/std:%STD% ^
+  --define=absl=1 ^
+  --distdir=%KOKORO_GFILE_DIR%\distdir ^
+  --features=external_include_paths ^
+  --keep_going ^
+  --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
+  --test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^
+  --test_output=errors ^
+  --test_tag_filters=-benchmark
+
+if %errorlevel% neq 0 EXIT /B 1
+EXIT /B 0
diff --git a/abseil-cpp/ci/windows_msvc_cmake.bat b/abseil-cpp/ci/windows_msvc_cmake.bat
new file mode 100755
index 0000000..743b6f7
--- /dev/null
+++ b/abseil-cpp/ci/windows_msvc_cmake.bat
@@ -0,0 +1,69 @@
+:: Copyright 2023 The Abseil Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+::     https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+SETLOCAL ENABLEDELAYEDEXPANSION
+
+:: Use GoogleTest v1.14.0
+SET ABSL_GOOGLETEST_COMMIT=f8d7d77c06936315286eb55f8de22cd23c188571
+
+IF EXIST %KOKORO_GFILE_DIR%\distdir\%ABSL_GOOGLETEST_COMMIT%.zip (
+  SET ABSL_GOOGLETEST_DOWNLOAD_URL=file://%KOKORO_GFILE_DIR%\distdir\%ABSL_GOOGLETEST_COMMIT%.zip
+) ELSE (
+  SET ABSL_GOOGLETEST_DOWNLOAD_URL=https://github.com/google/googletest/archive/%ABSL_GOOGLETEST_COMMIT%.zip
+)
+
+:: Replace '\' with '/' in Windows paths for CMake.
+:: Note that this cannot go inside the IF block above, because BAT files are weird.
+SET ABSL_GOOGLETEST_DOWNLOAD_URL=%ABSL_GOOGLETEST_DOWNLOAD_URL:\=/%
+
+IF EXIST "C:\Program Files\CMake\bin\" (
+  SET CMAKE_BIN="C:\Program Files\CMake\bin\cmake.exe"
+  SET CTEST_BIN="C:\Program Files\CMake\bin\ctest.exe"
+) ELSE (
+  SET CMAKE_BIN="cmake.exe"
+  SET CTEST_BIN="ctest.exe"
+)
+
+SET CTEST_OUTPUT_ON_FAILURE=1
+SET CMAKE_BUILD_PARALLEL_LEVEL=16
+SET CTEST_PARALLEL_LEVEL=16
+
+:: Change directory to the root of the project.
+CD %~dp0\..
+if %errorlevel% neq 0 EXIT /B 1
+
+SET TZDIR=%CD%\absl\time\internal\cctz\testdata\zoneinfo
+
+MKDIR "build"
+CD "build"
+
+SET CXXFLAGS="/WX"
+
+%CMAKE_BIN% ^
+  -DABSL_BUILD_TEST_HELPERS=ON ^
+  -DABSL_BUILD_TESTING=ON ^
+  -DABSL_GOOGLETEST_DOWNLOAD_URL=%ABSL_GOOGLETEST_DOWNLOAD_URL% ^
+  -DBUILD_SHARED_LIBS=%ABSL_CMAKE_BUILD_SHARED% ^
+  -DCMAKE_CXX_STANDARD=%ABSL_CMAKE_CXX_STANDARD% ^
+  -G "%ABSL_CMAKE_GENERATOR%" ^
+  ..
+IF %errorlevel% neq 0 EXIT /B 1
+
+%CMAKE_BIN% --build . --target ALL_BUILD --config %ABSL_CMAKE_BUILD_TYPE%
+IF %errorlevel% neq 0 EXIT /B 1
+
+%CTEST_BIN% -C %ABSL_CMAKE_BUILD_TYPE% -E "absl_lifetime_test|absl_symbolize_test"
+IF %errorlevel% neq 0 EXIT /B 1
+
+EXIT /B 0
diff --git a/abseil-cpp/conanfile.py b/abseil-cpp/conanfile.py
old mode 100644
new mode 100755
index 926ec5c..4bbc62e
--- a/abseil-cpp/conanfile.py
+++ b/abseil-cpp/conanfile.py
@@ -30,7 +30,7 @@
             raise ConanInvalidConfiguration("Abseil does not support MSVC < 14")
 
     def build(self):
-        tools.replace_in_file("CMakeLists.txt", "project(absl CXX)", "project(absl CXX)\ninclude(conanbuildinfo.cmake)\nconan_basic_setup()")
+        tools.replace_in_file("CMakeLists.txt", "project(absl LANGUAGES CXX)", "project(absl LANGUAGES CXX)\ninclude(conanbuildinfo.cmake)\nconan_basic_setup()")
         cmake = CMake(self)
         cmake.definitions["BUILD_TESTING"] = False
         cmake.configure()
diff --git a/abseil-cpp/create_lts.py b/abseil-cpp/create_lts.py
new file mode 100755
index 0000000..642b884
--- /dev/null
+++ b/abseil-cpp/create_lts.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""A script to do source transformations to create a new LTS release.
+
+   Usage: ./create_lts.py YYYYMMDD
+"""
+
+import sys
+
+
+def ReplaceStringsInFile(filename, replacement_dict):
+  """Performs textual replacements in a file.
+
+  Rewrites filename with the keys in replacement_dict replaced with
+  their values. This function assumes the file can fit in memory.
+
+  Args:
+    filename: the filename to perform the replacement on
+    replacement_dict: a dictionary of key strings to be replaced with their
+      values
+
+  Raises:
+    Exception: A failure occurred
+  """
+  f = open(filename, 'r')
+  content = f.read()
+  f.close()
+
+  for key, value in replacement_dict.items():
+    original = content
+    content = content.replace(key, value)
+    if content == original:
+      raise Exception('Failed to find {} in {}'.format(key, filename))
+
+  f = open(filename, 'w')
+  f.write(content)
+  f.close()
+
+
+def StripContentBetweenTags(filename, strip_begin_tag, strip_end_tag):
+  """Strip contents from a file.
+
+  Rewrites filename with by removing all content between
+  strip_begin_tag and strip_end_tag, including the tags themselves.
+
+  Args:
+    filename: the filename to perform the replacement on
+    strip_begin_tag: the start of the content to be removed
+    strip_end_tag: the end of the content to be removed
+
+  Raises:
+    Exception: A failure occurred
+  """
+  f = open(filename, 'r')
+  content = f.read()
+  f.close()
+
+  while True:
+    begin = content.find(strip_begin_tag)
+    if begin == -1:
+      break
+    end = content.find(strip_end_tag, begin + len(strip_begin_tag))
+    if end == -1:
+      raise Exception('{}: imbalanced strip begin ({}) and '
+                      'end ({}) tags'.format(filename, strip_begin_tag,
+                                             strip_end_tag))
+    content = content.replace(content[begin:end + len(strip_end_tag)], '')
+
+  f = open(filename, 'w')
+  f.write(content)
+  f.close()
+
+
+def main(argv):
+  if len(argv) != 2:
+    print('Usage: {} YYYYMMDD'.format(sys.argv[0], file=sys.stderr))
+    sys.exit(1)
+
+  datestamp = sys.argv[1]
+  if len(datestamp) != 8 or not datestamp.isdigit():
+    raise Exception(
+        'datestamp={} is not in the YYYYMMDD format'.format(datestamp))
+
+  # Replacement directives go here.
+  ReplaceStringsInFile(
+      'absl/base/config.h', {
+          '#undef ABSL_LTS_RELEASE_VERSION':
+              '#define ABSL_LTS_RELEASE_VERSION {}'.format(datestamp),
+          '#undef ABSL_LTS_RELEASE_PATCH_LEVEL':
+              '#define ABSL_LTS_RELEASE_PATCH_LEVEL 0'
+      })
+  ReplaceStringsInFile(
+      'absl/base/options.h', {
+          '#define ABSL_OPTION_USE_INLINE_NAMESPACE 0':
+              '#define ABSL_OPTION_USE_INLINE_NAMESPACE 1',
+          '#define ABSL_OPTION_INLINE_NAMESPACE_NAME head':
+              '#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_{}'.format(
+                  datestamp)
+      })
+  ReplaceStringsInFile(
+      'CMakeLists.txt', {
+          'project(absl LANGUAGES CXX)':
+              'project(absl LANGUAGES CXX VERSION {})'.format(datestamp)
+      })
+  # Set the SOVERSION to YYMM.0.0 - The first 0 means we only have ABI
+  # compatible changes, and the second 0 means we can increment it to
+  # mark changes as ABI-compatible, for patch releases.  Note that we
+  # only use the last two digits of the year and the month because the
+  # MacOS linker requires the first part of the SOVERSION to fit into
+  # 16 bits.
+  # https://www.sicpers.info/2013/03/how-to-version-a-mach-o-library/
+  ReplaceStringsInFile(
+      'CMake/AbseilHelpers.cmake',
+      {'SOVERSION 0': 'SOVERSION "{}.0.0"'.format(datestamp[2:6])})
+  StripContentBetweenTags('CMakeLists.txt', '# absl:lts-remove-begin',
+                          '# absl:lts-remove-end')
+
+
+if __name__ == '__main__':
+  main(sys.argv)